From c0208c92df28acd303c0484dcbfa4669388f42f1 Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Mon, 10 Dec 2018 14:47:54 +0000 Subject: [PATCH 1/8] analysis: Rename self._trace into self.trace Prepare the ground for reusing some of the analysis infrastructure in test methods and classes (events checking). --- lisa/analysis/base.py | 6 ++-- lisa/analysis/cpus.py | 8 ++--- lisa/analysis/frequency.py | 52 ++++++++++++++--------------- lisa/analysis/functions.py | 4 +-- lisa/analysis/idle.py | 22 ++++++------- lisa/analysis/latency.py | 24 +++++++------- lisa/analysis/load_tracking.py | 48 +++++++++++++-------------- lisa/analysis/status.py | 4 +-- lisa/analysis/tasks.py | 60 +++++++++++++++++----------------- lisa/analysis/thermal.py | 14 ++++---- 10 files changed, 121 insertions(+), 121 deletions(-) diff --git a/lisa/analysis/base.py b/lisa/analysis/base.py index 975993c08..76e3abc24 100644 --- a/lisa/analysis/base.py +++ b/lisa/analysis/base.py @@ -82,7 +82,7 @@ class AnalysisBase(Loggable): """ def __init__(self, trace): - self._trace = trace + self.trace = trace @classmethod def setup_plot(cls, width=16, height=4, ncols=1, nrows=1, **kwargs): @@ -179,7 +179,7 @@ class AnalysisBase(Loggable): module = self.__module__ caller = inspect.stack()[1][3] filepath = os.path.join( - self._trace.plots_dir, + self.trace.plots_dir, "{}.{}.{}".format(module, caller, img_format)) figure.savefig(filepath, format=img_format) @@ -190,7 +190,7 @@ class AnalysisBase(Loggable): :raises: MissingTraceEventError if some events are not available """ - available_events = sorted(set(self._trace.available_events)) + available_events = sorted(set(self.trace.available_events)) missing_events = sorted(set(required_events).difference(available_events)) if missing_events: diff --git a/lisa/analysis/cpus.py b/lisa/analysis/cpus.py index 474730630..fff331822 100644 --- a/lisa/analysis/cpus.py +++ b/lisa/analysis/cpus.py @@ -47,8 +47,8 @@ class CpusAnalysis(AnalysisBase): * A ``context_switch_cnt`` column (the number of context switch per CPU) """ - sched_df = self._trace.df_events('sched_switch') - cpus = list(range(self._trace.cpus_count)) + sched_df = self.trace.df_events('sched_switch') + cpus = list(range(self.trace.cpus_count)) ctx_sw_df = pd.DataFrame( [len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus], index=cpus, @@ -87,8 +87,8 @@ class CpusAnalysis(AnalysisBase): :param cpu: The CPU :type cpu: int """ - if "cpu-capacities" in self._trace.plat_info: - axis.axhline(self._trace.plat_info["cpu-capacities"][cpu], + if "cpu-capacities" in self.trace.plat_info: + axis.axhline(self.trace.plat_info["cpu-capacities"][cpu], color=self.get_next_color(axis), linestyle='--', label="orig_capacity") diff --git a/lisa/analysis/frequency.py b/lisa/analysis/frequency.py index 40e70f8c5..1363803ff 100644 --- a/lisa/analysis/frequency.py +++ b/lisa/analysis/frequency.py @@ -56,12 +56,12 @@ class FrequencyAnalysis(AnalysisBase): * A ``total_time`` column (the total time spent at a frequency) * A ``active_time`` column (the non-idle time spent at a frequency) """ - freq_df = self._trace.df_events('cpu_frequency') + freq_df = self.trace.df_events('cpu_frequency') # Assumption: all CPUs in a cluster run at the same frequency, i.e. the # frequency is scaled per-cluster not per-CPU. Hence, we can limit the # cluster frequencies data to a single CPU. This assumption is verified # by the Trace module when parsing the trace. - if len(cpus) > 1 and not self._trace.freq_coherency: + if len(cpus) > 1 and not self.trace.freq_coherency: self.get_logger().warning('Cluster frequency is NOT coherent,' 'cannot compute residency!') return None @@ -69,12 +69,12 @@ class FrequencyAnalysis(AnalysisBase): cluster_freqs = freq_df[freq_df.cpu == cpus[0]] # Compute TOTAL Time - cluster_freqs = self._trace.add_events_deltas( + cluster_freqs = self.trace.add_events_deltas( cluster_freqs, col_name="total_time", inplace=False) time_df = cluster_freqs[["total_time", "frequency"]].groupby(["frequency"]).sum() # Compute ACTIVE Time - cluster_active = self._trace.analysis.idle.signal_cluster_active(cpus) + cluster_active = self.trace.analysis.idle.signal_cluster_active(cpus) # In order to compute the active time spent at each frequency we # multiply 2 square waves: @@ -94,7 +94,7 @@ class FrequencyAnalysis(AnalysisBase): freq_active = cluster_freqs.frequency.apply(lambda x: 1 if x == freq else 0) active_t = cluster_freqs.active * freq_active # Compute total time by integrating the square wave - nonidle_time.append(self._trace.integrate_square_wave(active_t)) + nonidle_time.append(self.trace.integrate_square_wave(active_t)) time_df["active_time"] = pd.DataFrame(index=available_freqs, data=nonidle_time) return time_df @@ -132,7 +132,7 @@ class FrequencyAnalysis(AnalysisBase): * A ``total_time`` column (the total time spent at a frequency) * A ``active_time`` column (the non-idle time spent at a frequency) """ - domains = self._trace.plat_info['freq-domains'] + domains = self.trace.plat_info['freq-domains'] for domain in domains: if cpu in domain: return self._get_frequency_residency(tuple(domain)) @@ -150,7 +150,7 @@ class FrequencyAnalysis(AnalysisBase): * A ``transitions`` column (the number of frequency transitions) """ - freq_df = self._trace.df_events('cpu_frequency') + freq_df = self.trace.df_events('cpu_frequency') cpu_freqs = freq_df[freq_df.cpu == cpu].frequency # Remove possible duplicates (example: when devlib sets trace markers @@ -180,7 +180,7 @@ class FrequencyAnalysis(AnalysisBase): return None return transitions.apply( - lambda x: x / (self._trace.x_max - self._trace.x_min) + lambda x: x / (self.trace.x_max - self.trace.x_min) ) @requires_events(['cpu_frequency']) @@ -191,12 +191,12 @@ class FrequencyAnalysis(AnalysisBase): :param cpu: The CPU to analyse :type cpu: int """ - df = self._trace.df_events('cpu_frequency') + df = self.trace.df_events('cpu_frequency') df = df[df.cpu == cpu] # We can't use the pandas average because it's not weighted by # time spent in each frequency, so we have to craft our own. - df = self._trace.add_events_deltas(df, inplace=False) + df = self.trace.add_events_deltas(df, inplace=False) timespan = df.index[-1] - df.index[0] return (df['frequency'] * df['delta']).sum() / timespan @@ -217,7 +217,7 @@ class FrequencyAnalysis(AnalysisBase): :raises: KeyError """ - freq = self._trace.getPeripheralClockEffectiveRate(clk) + freq = self.trace.getPeripheralClockEffectiveRate(clk) if freq is None or freq.empty: self.get_logger().warning('no peripheral clock events found for clock') return @@ -237,7 +237,7 @@ class FrequencyAnalysis(AnalysisBase): if len(set_rate) > 0: rate_axis_lib = set_rate.max() set_rate.plot(style=['b--'], ax=freq_axis, drawstyle='steps-post', alpha=0.4, label="clock_set_rate value") - freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], self._trace.x_max, linestyle='--', color='b', alpha=0.4) + freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], self.trace.x_max, linestyle='--', color='b', alpha=0.4) else: self.get_logger().warning('No clock_set_rate events to plot') @@ -246,12 +246,12 @@ class FrequencyAnalysis(AnalysisBase): if len(eff_rate) > 0 and eff_rate.max() > 0: rate_axis_lib = max(rate_axis_lib, eff_rate.max()) eff_rate.plot(style=['b-'], ax=freq_axis, drawstyle='steps-post', alpha=1.0, label="Effective rate (with on/off)") - freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], self._trace.x_max, linestyle='-', color='b', alpha=1.0) + freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], self.trace.x_max, linestyle='-', color='b', alpha=1.0) else: self.get_logger().warning('No effective frequency events to plot') freq_axis.set_ylim(0, rate_axis_lib * 1.1) - freq_axis.set_xlim(self._trace.x_min, self._trace.x_max) + freq_axis.set_xlim(self.trace.x_min, self.trace.x_max) freq_axis.set_xlabel('') freq_axis.grid(True) freq_axis.legend() @@ -272,16 +272,16 @@ class FrequencyAnalysis(AnalysisBase): # Plot time period that the clock state was unknown from the trace indeterminate = pd.concat([on, off]).sort_index() if indeterminate.empty: - indet_range_max = self._trace.x_max + indet_range_max = self.trace.x_max else: indet_range_max = indeterminate.index[0] state_axis.hlines(0, 0, indet_range_max, linewidth = 1.0, label='indeterminate clock state', linestyle='--') state_axis.legend(bbox_to_anchor=(0., 1.02, 1., 0.102), loc=3, ncol=3, mode='expand') state_axis.set_yticks([]) state_axis.set_xlabel('seconds') - state_axis.set_xlim(self._trace.x_min, self._trace.x_max) + state_axis.set_xlim(self.trace.x_min, self.trace.x_max) - figname = os.path.join(self._trace.plots_dir, '{}{}.png'.format(self._trace.plots_prefix, clk)) + figname = os.path.join(self.trace.plots_dir, '{}{}.png'.format(self.trace.plots_prefix, clk)) pl.savefig(figname, bbox_inches='tight') @@ -299,7 +299,7 @@ class FrequencyAnalysis(AnalysisBase): If ``sched_overutilized`` events are available, the plots will also show the intervals of time where the system was overutilized. """ - df = self._trace.df_events('cpu_frequency') + df = self.trace.df_events('cpu_frequency') df = df[df.cpu == cpu] local_fig = not axis @@ -307,7 +307,7 @@ class FrequencyAnalysis(AnalysisBase): if local_fig: fig, axis = self.setup_plot() - frequencies = self._trace.plat_info['freqs'][cpu] + frequencies = self.trace.plat_info['freqs'][cpu] avg = self.get_average_cpu_frequency(cpu) self.get_logger().info( @@ -320,12 +320,12 @@ class FrequencyAnalysis(AnalysisBase): axis.axhline(avg, color=self.get_next_color(axis), linestyle='--', label="average") - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) axis.set_ylabel('Frequency (Hz)') axis.set_xlabel('Time') @@ -347,7 +347,7 @@ class FrequencyAnalysis(AnalysisBase): If ``sched_overutilized`` events are available, the plots will also show the intervals of time where the cluster was overutilized. """ - domains = self._trace.plat_info['freq-domains'] + domains = self.trace.plat_info['freq-domains'] fig, axes = self.setup_plot(nrows=len(domains), sharex=True) for idx, domain in enumerate(domains): @@ -356,7 +356,7 @@ class FrequencyAnalysis(AnalysisBase): self.plot_cpu_frequencies(domain[0], filepath, axis) axis.set_title('Frequencies of CPUS {}'.format(domain)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) @@ -419,7 +419,7 @@ class FrequencyAnalysis(AnalysisBase): :param pct: Plot residencies in percentage :type pct: bool """ - domains = self._trace.plat_info['freq-domains'] + domains = self.trace.plat_info['freq-domains'] fig, axes = self.setup_plot(nrows=2*len(domains), sharex=True) for idx, domain in enumerate(domains): @@ -480,7 +480,7 @@ class FrequencyAnalysis(AnalysisBase): :param pct: Plot frequency transitions in percentage :type pct: bool """ - domains = self._trace.plat_info['freq-domains'] + domains = self.trace.plat_info['freq-domains'] fig, axes = self.setup_plot(nrows=len(domains)) diff --git a/lisa/analysis/functions.py b/lisa/analysis/functions.py index 64ecb3e9a..434d30c0b 100644 --- a/lisa/analysis/functions.py +++ b/lisa/analysis/functions.py @@ -51,12 +51,12 @@ class FunctionsAnalysis(AnalysisBase): time - total execution time :type metrics: srt or list(str) """ - if not hasattr(self._trace, '_functions_stats_df'): + if not hasattr(self.trace, '_functions_stats_df'): self.get_logger().warning('Functions stats data not available') return metrics = listify(metrics) - df = self._trace.df_functions_stats(functions) + df = self.trace.df_functions_stats(functions) # Check that all the required metrics are acutally availabe available_metrics = df.columns.tolist() diff --git a/lisa/analysis/idle.py b/lisa/analysis/idle.py index e467ccd5c..669b94151 100644 --- a/lisa/analysis/idle.py +++ b/lisa/analysis/idle.py @@ -55,7 +55,7 @@ class IdleAnalysis(AnalysisBase): :returns: A :class:`pandas.Series` that equals 1 at timestamps where the CPU is reported to be non-idle, 0 otherwise """ - idle_df = self._trace.df_events('cpu_idle') + idle_df = self.trace.df_events('cpu_idle') cpu_df = idle_df[idle_df.cpu_id == cpu] cpu_active = cpu_df.state.apply( @@ -63,8 +63,8 @@ class IdleAnalysis(AnalysisBase): ) start_time = 0.0 - if not self._trace.ftrace.normalized_time: - start_time = self._trace.ftrace.basetime + if not self.trace.ftrace.normalized_time: + start_time = self.trace.ftrace.basetime if cpu_active.empty: cpu_active = pd.Series([0], index=[start_time]) @@ -121,11 +121,11 @@ class IdleAnalysis(AnalysisBase): * A ``cpu`` column (the CPU that woke up at the row index) """ - cpus = list(range(self._trace.cpus_count)) + cpus = list(range(self.trace.cpus_count)) sr = pd.Series() for cpu in cpus: - cpu_sr = self._trace.getCPUActiveSignal(cpu) + cpu_sr = self.trace.getCPUActiveSignal(cpu) cpu_sr = cpu_sr[cpu_sr == 1] cpu_sr = cpu_sr.replace(1, cpu) sr = sr.append(cpu_sr) @@ -145,7 +145,7 @@ class IdleAnalysis(AnalysisBase): * Idle states as index * A ``time`` column (The time spent in the idle state) """ - idle_df = self._trace.df_events('cpu_idle') + idle_df = self.trace.df_events('cpu_idle') cpu_idle = idle_df[idle_df.cpu_id == cpu] cpu_is_idle = self.signal_cpu_active(cpu) ^ 1 @@ -165,7 +165,7 @@ class IdleAnalysis(AnalysisBase): # Extend the last cpu_idle event to the end of the time window under # consideration - final_entry = pd.DataFrame([cpu_idle.iloc[-1]], index=[self._trace.x_max]) + final_entry = pd.DataFrame([cpu_idle.iloc[-1]], index=[self.trace.x_max]) cpu_idle = cpu_idle.append(final_entry) idle_time = [] @@ -175,7 +175,7 @@ class IdleAnalysis(AnalysisBase): ) idle_t = cpu_idle.is_idle * idle_state # Compute total time by integrating the square wave - idle_time.append(self._trace.integrate_square_wave(idle_t)) + idle_time.append(self.trace.integrate_square_wave(idle_t)) idle_time_df = pd.DataFrame({'time' : idle_time}, index=available_idles) idle_time_df.index.name = 'idle_state' @@ -194,7 +194,7 @@ class IdleAnalysis(AnalysisBase): * Idle states as index * A ``time`` column (The time spent in the idle state) """ - idle_df = self._trace.df_events('cpu_idle') + idle_df = self.trace.df_events('cpu_idle') # Each core in a cluster can be in a different idle state, but the # cluster lies in the idle state with lowest ID, that is the shallowest # idle state among the idle states of its CPUs @@ -233,7 +233,7 @@ class IdleAnalysis(AnalysisBase): ) idle_t = cl_idle.is_idle * idle_state # Compute total time by integrating the square wave - idle_time.append(self._trace.integrate_square_wave(idle_t)) + idle_time.append(self.trace.integrate_square_wave(idle_t)) idle_time_df = pd.DataFrame({'time' : idle_time}, index=available_idles) idle_time_df.index.name = 'idle_state' @@ -309,7 +309,7 @@ class IdleAnalysis(AnalysisBase): .. note:: This assumes clusters == frequency domains, which may not hold true... """ - clusters = self._trace.plat_info['freq-domains'] + clusters = self.trace.plat_info['freq-domains'] fig, axes = self.setup_plot(nrows=len(clusters), sharex=True) diff --git a/lisa/analysis/latency.py b/lisa/analysis/latency.py index 70f6252f5..d64ca6872 100644 --- a/lisa/analysis/latency.py +++ b/lisa/analysis/latency.py @@ -55,7 +55,7 @@ class LatencyAnalysis(AnalysisBase): * A ``wakeup_latency`` column (the wakeup latency at that timestamp). """ - df = self._trace.analysis.tasks.df_task_states(task) + df = self.trace.analysis.tasks.df_task_states(task) df = df[(df.curr_state == TaskState.TASK_WAKING.char) & (df.next_state == TaskState.TASK_ACTIVE.char)][["delta"]] @@ -75,7 +75,7 @@ class LatencyAnalysis(AnalysisBase): * A ``preempt_latency`` column (the preemption latency at that timestamp). """ - df = self._trace.analysis.tasks.df_task_states(task) + df = self.trace.analysis.tasks.df_task_states(task) df = df[(df.curr_state.str.contains(TaskState.TASK_RUNNING.char)) & (df.next_state == TaskState.TASK_ACTIVE.char)][["delta"]] @@ -95,7 +95,7 @@ class LatencyAnalysis(AnalysisBase): * An ``activation_interval`` column (the time since the last activation). """ - wkp_df = self._trace.analysis.tasks.df_task_states(task) + wkp_df = self.trace.analysis.tasks.df_task_states(task) wkp_df = wkp_df[wkp_df.curr_state == TaskState.TASK_WAKING.char] index = wkp_df.index.to_frame() @@ -119,7 +119,7 @@ class LatencyAnalysis(AnalysisBase): * A ``running_time`` column (the cumulated running time since the last activation). """ - df = self._trace.analysis.tasks.df_task_states(task) + df = self.trace.analysis.tasks.df_task_states(task) runtimes = [] spurious_wkp = False @@ -214,7 +214,7 @@ class LatencyAnalysis(AnalysisBase): axis.set_title("Latencies of task \"{}\"".format(task)) axis.set_ylabel("Latency (s)") axis.legend() - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -359,7 +359,7 @@ class LatencyAnalysis(AnalysisBase): plot_bands(wkl_df, "wakeup_latency", "Wakeup latencies") plot_bands(prt_df, "preempt_latency", "Preemption latencies") axis.legend() - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) if local_fig: self.save_plot(fig, filepath) @@ -380,13 +380,13 @@ class LatencyAnalysis(AnalysisBase): wkp_df.plot(style='+', logy=False, ax=axis) - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Activation intervals of task \"{}\"".format(task)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -405,13 +405,13 @@ class LatencyAnalysis(AnalysisBase): df.plot(style='+', ax=axis) - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Per-activation runtimes of task \"{}\"".format(task)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis diff --git a/lisa/analysis/load_tracking.py b/lisa/analysis/load_tracking.py index 6e2f59b54..0595d7e54 100644 --- a/lisa/analysis/load_tracking.py +++ b/lisa/analysis/load_tracking.py @@ -62,7 +62,7 @@ class LoadTrackingAnalysis(AnalysisBase): return [] def _df_uniformized_signal(self, event): - df = self._trace.df_events(event) + df = self.trace.df_events(event) df = df.rename(columns=self._columns_renaming(event)) @@ -80,7 +80,7 @@ class LoadTrackingAnalysis(AnalysisBase): def _df_either_event(self, events): for event in events: - if event not in self._trace.available_events: + if event not in self.trace.available_events: continue return self._df_uniformized_signal(event) @@ -126,10 +126,10 @@ class LoadTrackingAnalysis(AnalysisBase): """ df = self._df_either_event(['sched_load_se', 'sched_load_avg_task']) - if "cpu-capacities" in self._trace.plat_info: + if "cpu-capacities" in self.trace.plat_info: # Add a column which represents the max capacity of the smallest # CPU which can accomodate the task utilization - capacities = sorted(self._trace.plat_info["cpu-capacities"].values()) + capacities = sorted(self.trace.plat_info["cpu-capacities"].values()) def fits_capacity(util): for capacity in capacities: @@ -167,7 +167,7 @@ class LoadTrackingAnalysis(AnalysisBase): samples = samples.sort_values(ascending=False) top_df = pd.DataFrame(samples).rename(columns={"util" : "samples"}) - top_df["comm"] = top_df.index.map(self._trace.get_task_by_pid) + top_df["comm"] = top_df.index.map(self.trace.get_task_by_pid) return top_df @@ -178,7 +178,7 @@ class LoadTrackingAnalysis(AnalysisBase): :param cpus: list of CPUs to be plotted :type cpus: list(int) """ - cpus = cpus or list(range(self._trace.cpus_count)) + cpus = cpus or list(range(self.trace.cpus_count)) fig, axes = self.setup_plot(nrows=len(cpus), sharex=True) cpus_df = self.df_cpus_signals() @@ -193,11 +193,11 @@ class LoadTrackingAnalysis(AnalysisBase): df[['util']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) df[['load']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) - self._trace.analysis.cpus.plot_orig_capacity(axis, cpu) + self.trace.analysis.cpus.plot_orig_capacity(axis, cpu) # Add capacities data if available - if self._trace.hasEvents('cpu_capacity'): - df = self._trace.df_events('cpu_capacity') + if self.trace.hasEvents('cpu_capacity'): + df = self.trace.df_events('cpu_capacity') df = df[df["__cpu"] == cpu] if len(df): data = df[['capacity', 'tip_capacity']] @@ -205,12 +205,12 @@ class LoadTrackingAnalysis(AnalysisBase): drawstyle='steps-post') # Add overutilized signal to the plot - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_ylim(0, 1100) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) axis.legend() self.save_plot(fig, filepath) @@ -227,20 +227,20 @@ class LoadTrackingAnalysis(AnalysisBase): df = self.df_tasks_signals() - pid = self._trace.get_task_pid(task) + pid = self.trace.get_task_pid(task) df = df[df.pid == pid] df[['util']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) df[['load']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title('Load-tracking signals of task "{}"'.format(task)) axis.legend() axis.grid(True) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -260,13 +260,13 @@ class LoadTrackingAnalysis(AnalysisBase): if local_fig: fig, axis = self.setup_plot(height=8) - pid = self._trace.get_task_pid(task) + pid = self.trace.get_task_pid(task) df = self.df_tasks_signals() df = df[df.pid == pid] # Build task names (there could be multiple, during the task lifetime) - task_name = 'Task ({}:{})'.format(pid, self._trace.get_task_by_pid(pid)) + task_name = 'Task ({}:{})'.format(pid, self.trace.get_task_by_pid(pid)) df["required_capacity"].plot( drawstyle='steps-post', @@ -278,7 +278,7 @@ class LoadTrackingAnalysis(AnalysisBase): if local_fig: axis.set_title(task_name) axis.set_ylim(0, 1100) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) axis.set_ylabel('Utilization') axis.set_xlabel('Time (s)') @@ -298,10 +298,10 @@ class LoadTrackingAnalysis(AnalysisBase): # Get all utilization update events df = self.df_tasks_signals() - pid = self._trace.get_task_pid(task) + pid = self.trace.get_task_pid(task) df = df[df.pid == pid] - cpu_capacities = self._trace.plat_info["cpu-capacities"] + cpu_capacities = self.trace.plat_info["cpu-capacities"] def evaluate_placement(cpu, required_capacity): capacity = cpu_capacities[cpu] @@ -321,13 +321,13 @@ class LoadTrackingAnalysis(AnalysisBase): for stat in df["placement"].unique(): df[df.placement == stat]["__cpu"].plot(ax=axis, style="+", label=stat) - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Utilization vs placement of task \"{}\"".format(task)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) axis.grid(True) axis.legend() diff --git a/lisa/analysis/status.py b/lisa/analysis/status.py index 858b28e26..c72ebafe1 100644 --- a/lisa/analysis/status.py +++ b/lisa/analysis/status.py @@ -50,7 +50,7 @@ class StatusAnalysis(AnalysisBase): * A ``len`` column (the time spent in that overutilized status) """ # Build sequence of overutilization "bands" - df = self._trace.df_events('sched_overutilized') + df = self.trace.df_events('sched_overutilized') # Remove duplicated index events, keep only last event which is the # only one with a non null length @@ -101,7 +101,7 @@ class StatusAnalysis(AnalysisBase): if local_fig: axis.set_title("System-wide overutilized status") - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) # vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 diff --git a/lisa/analysis/tasks.py b/lisa/analysis/tasks.py index fd04080d9..d03df0e90 100644 --- a/lisa/analysis/tasks.py +++ b/lisa/analysis/tasks.py @@ -135,11 +135,11 @@ class TasksAnalysis(AnalysisBase): * Task PIDs as index * A ``wakeups`` column (The number of wakeups) """ - df = self._trace.df_events('sched_wakeup') + df = self.trace.df_events('sched_wakeup') wakeups = df.groupby('pid').count()["comm"] df = pd.DataFrame(wakeups).rename(columns={"comm" : "wakeups"}) - df["comm"] = df.index.map(self._trace.get_task_by_pid) + df["comm"] = df.index.map(self.trace.get_task_by_pid) return df @@ -178,7 +178,7 @@ class TasksAnalysis(AnalysisBase): * A ``prio`` column (The priority of the task) * A ``comm`` column (The name of the task) """ - df = self._trace.df_events('sched_switch') + df = self.trace.df_events('sched_switch') # Filters tasks which have a priority bigger than threshold df = df[df.next_prio <= min_prio] @@ -194,7 +194,7 @@ class TasksAnalysis(AnalysisBase): columns={'next_pid': 'pid', 'next_prio': 'prio'}, inplace=True) rt_tasks.set_index('pid', inplace=True) - rt_tasks['comm'] = rt_tasks.index.map(self._trace.get_task_by_pid) + rt_tasks['comm'] = rt_tasks.index.map(self.trace.get_task_by_pid) return rt_tasks @@ -215,13 +215,13 @@ class TasksAnalysis(AnalysisBase): * A ``delta`` column (the duration for which the task will remain in this state) """ - pid = self._trace.get_task_pid(task) + pid = self.trace.get_task_pid(task) - wk_df = self._trace.df_events('sched_wakeup') - sw_df = self._trace.df_events('sched_switch') + wk_df = self.trace.df_events('sched_wakeup') + sw_df = self.trace.df_events('sched_switch') - if "sched_wakeup_new" in self._trace.events: - wkn_df = self._trace.df_events('sched_wakeup_new') + if "sched_wakeup_new" in self.trace.events: + wkn_df = self.trace.df_events('sched_wakeup_new') wk_df = pd.concat([wk_df, wkn_df]).sort_index() task_wakeup = wk_df[wk_df.pid == pid][['target_cpu', '__cpu']] @@ -256,7 +256,7 @@ class TasksAnalysis(AnalysisBase): task_state_df.rename(columns={'__cpu' : 'cpu'}, inplace=True) task_state_df = task_state_df[['target_cpu', 'cpu', 'curr_state']] task_state_df['next_state'] = task_state_df.curr_state.shift(-1) - self._trace.add_events_deltas(task_state_df, inplace=True) + self.trace.add_events_deltas(task_state_df, inplace=True) return task_state_df @@ -273,7 +273,7 @@ class TasksAnalysis(AnalysisBase): * CPU IDs as index * A ``runtime`` column (the time the task spent being active) """ - cpus = set(range(self._trace.plat_info['cpus-count'])) + cpus = set(range(self.trace.plat_info['cpus-count'])) df = self.df_task_states(task) df = df[df.curr_state == TaskState.TASK_ACTIVE.char] @@ -303,14 +303,14 @@ class TasksAnalysis(AnalysisBase): """ fig, axis = self.setup_plot() - pid = self._trace.get_task_pid(task) + pid = self.trace.get_task_pid(task) - sw_df = self._trace.df_events("sched_switch") + sw_df = self.trace.df_events("sched_switch") sw_df = sw_df[sw_df.next_pid == pid] - if "freq-domains" in self._trace.plat_info: + if "freq-domains" in self.trace.plat_info: # If we are aware of frequency domains, use one color per domain - for domain in self._trace.plat_info["freq-domains"]: + for domain in self.trace.plat_info["freq-domains"]: df = sw_df[sw_df["__cpu"].isin(domain)]["__cpu"] print(domain) @@ -326,18 +326,18 @@ class TasksAnalysis(AnalysisBase): else: sw_df["__cpu"].plot(ax=axis, style='+') - plot_overutilized = self._trace.analysis.status.plot_overutilized - if self._trace.hasEvents(plot_overutilized.required_events): + plot_overutilized = self.trace.analysis.status.plot_overutilized + if self.trace.hasEvents(plot_overutilized.required_events): plot_overutilized(axis=axis) # Add an extra CPU lane to make room for the legend - axis.set_ylim(-0.95, self._trace.cpus_count - 0.05) + axis.set_ylim(-0.95, self.trace.cpus_count - 0.05) axis.set_title("CPU residency of task \"{}\"".format(task)) axis.set_ylabel('CPUs') axis.grid(True) axis.legend() - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) @@ -369,10 +369,10 @@ class TasksAnalysis(AnalysisBase): """ Discrete the contents of ``series`` in ``time_delta`` buckets """ - left = self._trace.x_min + left = self.trace.x_min data = [] index = [] - for right in np.arange(left + time_delta, self._trace.x_max, time_delta): + for right in np.arange(left + time_delta, self.trace.x_max, time_delta): index.append(left) data.append(series[left:right].count()) left = right @@ -383,7 +383,7 @@ class TasksAnalysis(AnalysisBase): """ Plot some data in a heatmap-style 2d histogram """ - nr_cpus = self._trace.cpus_count + nr_cpus = self.trace.cpus_count fig, axis = self.setup_plot(height=min(4, nr_cpus // 2), width=20) _, _, _, img = axis.hist2d(x, y, bins=[xbins, nr_cpus], **kwargs) @@ -405,7 +405,7 @@ class TasksAnalysis(AnalysisBase): """ fig, axis = self.setup_plot() - df = self._trace.df_events("sched_wakeup") + df = self.trace.df_events("sched_wakeup") if target_cpus: df = df[df.target_cpu.isin(target_cpus)] @@ -414,7 +414,7 @@ class TasksAnalysis(AnalysisBase): df.plot(ax=axis, legend=False) axis.set_title("Number of task wakeups within {}s windows".format(time_delta)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) @@ -432,13 +432,13 @@ class TasksAnalysis(AnalysisBase): :type colormap: str or matplotlib.colors.Colormap """ - df = self._trace.df_events("sched_wakeup") + df = self.trace.df_events("sched_wakeup") fig, axis = self._plot_cpu_heatmap( df.index, df.target_cpu, xbins, "Number of wakeups", cmap=colormap) axis.set_title("Tasks wakeups over time") - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) @@ -458,7 +458,7 @@ class TasksAnalysis(AnalysisBase): """ fig, axis = self.setup_plot() - df = self._trace.df_events("sched_wakeup_new") + df = self.trace.df_events("sched_wakeup_new") if target_cpus: df = df[df.target_cpu.isin(target_cpus)] @@ -467,7 +467,7 @@ class TasksAnalysis(AnalysisBase): df.plot(ax=axis, legend=False) axis.set_title("Number of task forks within {}s windows".format(time_delta)) - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) @@ -485,13 +485,13 @@ class TasksAnalysis(AnalysisBase): :type colormap: str or matplotlib.colors.Colormap """ - df = self._trace.df_events("sched_wakeup_new") + df = self.trace.df_events("sched_wakeup_new") fig, axis = self._plot_cpu_heatmap( df.index, df.target_cpu, xbins, "Number of forks", cmap=colormap) axis.set_title("Tasks forks over time") - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) diff --git a/lisa/analysis/thermal.py b/lisa/analysis/thermal.py index 011e5e0ad..9dca51712 100644 --- a/lisa/analysis/thermal.py +++ b/lisa/analysis/thermal.py @@ -44,7 +44,7 @@ class ThermalAnalysis(AnalysisBase): * A ``thermal_zone`` column (The thermal zone name) * A ``temp`` column (The reported temperature) """ - df = self._trace.df_events("thermal") + df = self.trace.df_events("thermal") df = df[['id', 'thermal_zone', 'temp']] return df @@ -64,7 +64,7 @@ class ThermalAnalysis(AnalysisBase): * A ``cdev_state`` column (The cooling device state index) """ - df = self._trace.df_events("cpu_out_power") + df = self.trace.df_events("cpu_out_power") df = df[['cpus', 'freq', 'cdev_state']] if cpus is not None: @@ -89,7 +89,7 @@ class ThermalAnalysis(AnalysisBase): * A ``freq`` column (The frequency limit) * A ``cdev_state`` column (The cooling device state index) """ - df = self._trace.df_events("devfreq_out_power") + df = self.trace.df_events("devfreq_out_power") df = df[['type', 'freq', 'cdev_state']] if devices is not None: @@ -159,7 +159,7 @@ class ThermalAnalysis(AnalysisBase): axis.grid(True) axis.set_title("Temperature evolution") axis.set_ylabel("Temperature (°C.10e3)") - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -192,7 +192,7 @@ class ThermalAnalysis(AnalysisBase): axis.set_title("cpufreq cooling devices status") axis.yaxis.set_major_locator(MaxNLocator(integer=True)) axis.grid(axis='y') - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -221,7 +221,7 @@ class ThermalAnalysis(AnalysisBase): axis.set_title("devfreq cooling devices status") axis.yaxis.set_major_locator(MaxNLocator(integer=True)) axis.grid(axis='y') - axis.set_xlim(self._trace.x_min, self._trace.x_max) + axis.set_xlim(self.trace.x_min, self.trace.x_max) self.save_plot(fig, filepath) return axis @@ -231,7 +231,7 @@ class ThermalAnalysis(AnalysisBase): ############################################################################### def _matching_masks(self, cpus): - df = self._trace.df_events('thermal_power_cpu_limit') + df = self.trace.df_events('thermal_power_cpu_limit') global_mask = list_to_mask(cpus) cpumasks = df['cpus'].unique().tolist() -- GitLab From dec94335f603536a20695f7c40fe77f4c384361b Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Tue, 11 Dec 2018 15:04:01 +0000 Subject: [PATCH 2/8] trace: Remove camel case and name mangling Align case with PEP-8 and the rest of LISA. Also remove names with double leading underscore that would be mangled, which would be confusing when debugging and will break subclassing for no good reason. --- lisa/analysis/frequency.py | 4 +- lisa/analysis/idle.py | 2 +- lisa/analysis/latency.py | 4 +- lisa/analysis/load_tracking.py | 8 +-- lisa/analysis/tasks.py | 2 +- lisa/energy_model.py | 2 +- lisa/tests/kernel/scheduler/misfit.py | 2 +- lisa/tests/lisa/test_trace.py | 2 +- .../eas/heavy_load.py | 2 +- lisa/trace.py | 58 +++++++++---------- lisa/wa_results_collector.py | 8 +-- 11 files changed, 47 insertions(+), 47 deletions(-) diff --git a/lisa/analysis/frequency.py b/lisa/analysis/frequency.py index 1363803ff..10f697dd7 100644 --- a/lisa/analysis/frequency.py +++ b/lisa/analysis/frequency.py @@ -217,7 +217,7 @@ class FrequencyAnalysis(AnalysisBase): :raises: KeyError """ - freq = self.trace.getPeripheralClockEffectiveRate(clk) + freq = self.trace.get_peripheral_clock_effective_rate(clk) if freq is None or freq.empty: self.get_logger().warning('no peripheral clock events found for clock') return @@ -321,7 +321,7 @@ class FrequencyAnalysis(AnalysisBase): label="average") plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1) diff --git a/lisa/analysis/idle.py b/lisa/analysis/idle.py index 669b94151..667cbb844 100644 --- a/lisa/analysis/idle.py +++ b/lisa/analysis/idle.py @@ -125,7 +125,7 @@ class IdleAnalysis(AnalysisBase): sr = pd.Series() for cpu in cpus: - cpu_sr = self.trace.getCPUActiveSignal(cpu) + cpu_sr = self.trace.get_cpu_active_signal(cpu) cpu_sr = cpu_sr[cpu_sr == 1] cpu_sr = cpu_sr.replace(1, cpu) sr = sr.append(cpu_sr) diff --git a/lisa/analysis/latency.py b/lisa/analysis/latency.py index d64ca6872..272dd9784 100644 --- a/lisa/analysis/latency.py +++ b/lisa/analysis/latency.py @@ -381,7 +381,7 @@ class LatencyAnalysis(AnalysisBase): wkp_df.plot(style='+', logy=False, ax=axis) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Activation intervals of task \"{}\"".format(task)) @@ -406,7 +406,7 @@ class LatencyAnalysis(AnalysisBase): df.plot(style='+', ax=axis) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Per-activation runtimes of task \"{}\"".format(task)) diff --git a/lisa/analysis/load_tracking.py b/lisa/analysis/load_tracking.py index 0595d7e54..ad2af0524 100644 --- a/lisa/analysis/load_tracking.py +++ b/lisa/analysis/load_tracking.py @@ -196,7 +196,7 @@ class LoadTrackingAnalysis(AnalysisBase): self.trace.analysis.cpus.plot_orig_capacity(axis, cpu) # Add capacities data if available - if self.trace.hasEvents('cpu_capacity'): + if self.trace.has_events('cpu_capacity'): df = self.trace.df_events('cpu_capacity') df = df[df["__cpu"] == cpu] if len(df): @@ -206,7 +206,7 @@ class LoadTrackingAnalysis(AnalysisBase): # Add overutilized signal to the plot plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_ylim(0, 1100) @@ -234,7 +234,7 @@ class LoadTrackingAnalysis(AnalysisBase): df[['load']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title('Load-tracking signals of task "{}"'.format(task)) @@ -322,7 +322,7 @@ class LoadTrackingAnalysis(AnalysisBase): df[df.placement == stat]["__cpu"].plot(ax=axis, style="+", label=stat) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) axis.set_title("Utilization vs placement of task \"{}\"".format(task)) diff --git a/lisa/analysis/tasks.py b/lisa/analysis/tasks.py index d03df0e90..aeda5cd2a 100644 --- a/lisa/analysis/tasks.py +++ b/lisa/analysis/tasks.py @@ -327,7 +327,7 @@ class TasksAnalysis(AnalysisBase): sw_df["__cpu"].plot(ax=axis, style='+') plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.hasEvents(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.required_events): plot_overutilized(axis=axis) # Add an extra CPU lane to make room for the legend diff --git a/lisa/energy_model.py b/lisa/energy_model.py index 328885005..aece2ca65 100644 --- a/lisa/energy_model.py +++ b/lisa/energy_model.py @@ -1110,7 +1110,7 @@ class EnergyModel(Serializable, Loggable): the returned DataFrame to get a Series that shows overall estimated power usage over time. """ - if not trace.hasEvents('cpu_idle') or not trace.hasEvents('cpu_frequency'): + if not trace.has_events('cpu_idle') or not trace.has_events('cpu_frequency'): raise ValueError('Requires cpu_idle and cpu_frequency trace events') idle = Parser(trace.ftrace).solve('cpu_idle:state') diff --git a/lisa/tests/kernel/scheduler/misfit.py b/lisa/tests/kernel/scheduler/misfit.py index 3effb1b2d..cc13d78b7 100644 --- a/lisa/tests/kernel/scheduler/misfit.py +++ b/lisa/tests/kernel/scheduler/misfit.py @@ -255,7 +255,7 @@ class StaggeredFinishes(MisfitMigrationBase): """ :returns: A dataframe that describes the idle status (on/off) of 'cpu' """ - active_df = pd.DataFrame(self.trace.getCPUActiveSignal(cpu), columns=['state']) + active_df = pd.DataFrame(self.trace.get_cpu_active_signal(cpu), columns=['state']) self.trace.add_events_deltas(active_df) return active_df diff --git a/lisa/tests/lisa/test_trace.py b/lisa/tests/lisa/test_trace.py index 4cea9bb79..61f05ad55 100644 --- a/lisa/tests/lisa/test_trace.py +++ b/lisa/tests/lisa/test_trace.py @@ -331,7 +331,7 @@ class TestTrace(StorageTestCase): -0 [000] 380335000000: clock_disable: bus_clk state=0 cpu_id=0 -0 [004] 380339000000: cpu_idle: state=1 cpu_id=4 """) - df = trace.getPeripheralClockEffectiveRate(clk_name='bus_clk') + df = trace.get_peripheral_clock_effective_rate(clk_name='bus_clk') exp_effective_rate=[ float('NaN'), 750000000, 0.0, 750000000, 100000000, 0.0] effective_rate = df['effective_rate'].tolist() self.assertEqual(len(exp_effective_rate), len(effective_rate)) diff --git a/lisa/tests/tests_TO_DISPATCH_IN_kernel/eas/heavy_load.py b/lisa/tests/tests_TO_DISPATCH_IN_kernel/eas/heavy_load.py index b75c5f879..dc9f5a77a 100644 --- a/lisa/tests/tests_TO_DISPATCH_IN_kernel/eas/heavy_load.py +++ b/lisa/tests/tests_TO_DISPATCH_IN_kernel/eas/heavy_load.py @@ -78,7 +78,7 @@ class HeavyLoadTest(LisaTest): total_cpu_time = 0 active_proportions = [] for cpu, _ in enumerate(self.target.core_names): - cpu_active = trace.getCPUActiveSignal(cpu) + cpu_active = trace.get_cpu_active_signal(cpu) if cpu_active is None: raise RuntimeError( "Couldn't get CPU-active signal. " diff --git a/lisa/trace.py b/lisa/trace.py index 86a08cc8b..7c1dc3c68 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -133,8 +133,8 @@ class Trace(Loggable): self.plots_prefix = plots_prefix - self.__registerTraceEvents(events) - self.__parseTrace(self.data_dir, window, trace_format) + self._register_trace_events(events) + self._parse_trace(self.data_dir, window, trace_format) self.analysis = AnalysisProxy(self) @@ -150,7 +150,7 @@ class Trace(Loggable): for e in self.available_events) return max_cpu + 1 - def setXTimeRange(self, t_min=None, t_max=None): + def set_x_time_range(self, t_min=None, t_max=None): """ Set x axis time range to the specified values. @@ -166,7 +166,7 @@ class Trace(Loggable): self.get_logger().debug('Set plots time range to (%.6f, %.6f)[s]', self.x_min, self.x_max) - def __registerTraceEvents(self, events): + def _register_trace_events(self, events): """ Save a copy of the parsed events. @@ -187,7 +187,7 @@ class Trace(Loggable): if 'cpu_frequency' in events: self.events.append('cpu_frequency_devlib') - def __parseTrace(self, path, window, trace_format): + def _parse_trace(self, path, window, trace_format): """ Internal method in charge of performing the actual parsing of the trace. @@ -233,7 +233,7 @@ class Trace(Loggable): has_function_stats = self._loadFunctionsStats(path) # Check for events available on the parsed trace - self.__checkAvailableEvents() + self._check_available_events() if len(self.available_events) == 0: if has_function_stats: logger.info('Trace contains only functions stats') @@ -242,9 +242,9 @@ class Trace(Loggable): 'nor function stats') # Index PIDs and Task names - self.__loadTasksNames() + self._load_tasks_names() - self.__computeTimeSpan() + self._compute_timespan() # Setup internal data reference to interesting events/dataframes self._sanitize_SchedLoadAvgCpu() @@ -257,7 +257,7 @@ class Trace(Loggable): self._sanitize_CpuFrequency() self._sanitize_ThermalPowerCpu() - def __checkAvailableEvents(self, key=""): + def _check_available_events(self, key=""): """ Internal method used to build a list of available events. @@ -273,13 +273,13 @@ class Trace(Loggable): for evt in self.available_events: logger.debug(' - %s', evt) - def __loadTasksNames(self): + def _load_tasks_names(self): """ Try to load tasks names using one of the supported events. """ def load(event, name_key, pid_key): df = self.df_events(event) - self._scanTasks(df, name_key=name_key, pid_key=pid_key) + self._scan_tasks(df, name_key=name_key, pid_key=pid_key) if 'sched_switch' in self.available_events: load('sched_switch', 'prev_comm', 'prev_pid') @@ -291,7 +291,7 @@ class Trace(Loggable): self.get_logger().warning('Failed to load tasks names from trace events') - def hasEvents(self, dataset): + def has_events(self, dataset): """ Returns True if the specified event is present in the parsed trace, False otherwise. @@ -304,7 +304,7 @@ class Trace(Loggable): return set(dataset).issubset(set(self.available_events)) - def __computeTimeSpan(self): + def _compute_timespan(self): """ Compute time axis range, considering all the parsed events. """ @@ -313,9 +313,9 @@ class Trace(Loggable): self.get_logger().debug('Collected events spans a %.3f [s] time interval', self.time_range) - self.setXTimeRange(max(self.start_time, self.window[0]), self.window[1]) + self.set_x_time_range(max(self.start_time, self.window[0]), self.window[1]) - def _scanTasks(self, df, name_key='comm', pid_key='pid'): + def _scan_tasks(self, df, name_key='comm', pid_key='pid'): """ Extract tasks names and PIDs from the input data frame. The data frame should contain a task name column and PID column. @@ -495,7 +495,7 @@ class Trace(Loggable): Add more columns to cpu_capacity data frame if the energy model is available and the platform is big.LITTLE. """ - if not self.hasEvents('cpu_capacity') \ + if not self.has_events('cpu_capacity') \ or 'nrg-model' not in self.plat_info \ or not self.has_big_little: return @@ -520,7 +520,7 @@ class Trace(Loggable): """ If necessary, rename certain signal names from v5.0 to v5.1 format. """ - if not self.hasEvents('sched_load_avg_cpu'): + if not self.has_events('sched_load_avg_cpu'): return df = self.df_events('sched_load_avg_cpu') if 'utilization' in df: @@ -531,7 +531,7 @@ class Trace(Loggable): """ If necessary, rename certain signal names from v5.0 to v5.1 format. """ - if not self.hasEvents('sched_load_avg_task'): + if not self.has_events('sched_load_avg_task'): return df = self.df_events('sched_load_avg_task') if 'utilization' in df: @@ -548,7 +548,7 @@ class Trace(Loggable): Also, if necessary, rename certain signal names from v5.0 to v5.1 format. """ - if not self.hasEvents('sched_boost_cpu'): + if not self.has_events('sched_boost_cpu'): return df = self.df_events('sched_boost_cpu') if 'usage' in df: @@ -562,7 +562,7 @@ class Trace(Loggable): Also, if necessary, rename certain signal names from v5.0 to v5.1 format. """ - if not self.hasEvents('sched_boost_task'): + if not self.has_events('sched_boost_task'): return df = self.df_events('sched_boost_task') if 'utilization' in df: @@ -578,7 +578,7 @@ class Trace(Loggable): Also convert between existing field name formats for sched_energy_diff """ logger = self.get_logger() - if not self.hasEvents('sched_energy_diff') \ + if not self.has_events('sched_energy_diff') \ or 'nrg-model' not in self.plat_info \ or not self.has_big_little: return @@ -621,7 +621,7 @@ class Trace(Loggable): def _sanitize_SchedOverutilized(self): """ Add a column with overutilized status duration. """ - if not self.hasEvents('sched_overutilized'): + if not self.has_events('sched_overutilized'): return df = self.df_events('sched_overutilized') @@ -643,7 +643,7 @@ class Trace(Loggable): return int(mask.replace(',', ''), 16) def _sanitize_ThermalPowerCpuGetPower(self): - if not self.hasEvents('thermal_power_cpu_get_power'): + if not self.has_events('thermal_power_cpu_get_power'): return df = self.df_events('thermal_power_cpu_get_power') @@ -653,7 +653,7 @@ class Trace(Loggable): ) def _sanitize_ThermalPowerCpuLimit(self): - if not self.hasEvents('thermal_power_cpu_limit'): + if not self.has_events('thermal_power_cpu_limit'): return df = self.df_events('thermal_power_cpu_limit') @@ -681,7 +681,7 @@ class Trace(Loggable): frequency scaling is performed at a cluster level). """ logger = self.get_logger() - if not self.hasEvents('cpu_frequency_devlib') \ + if not self.has_events('cpu_frequency_devlib') \ or 'freq-domains' not in self.plat_info: return @@ -812,7 +812,7 @@ class Trace(Loggable): return len(self._functions_stats_df) > 0 @memoized - def getCPUActiveSignal(self, cpu): + def get_cpu_active_signal(self, cpu): """ Build a square wave representing the active (i.e. non-idle) CPU time, i.e.: @@ -827,7 +827,7 @@ class Trace(Loggable): :returns: A :class:`pandas.Series` or ``None`` if the trace contains no "cpu_idle" events """ - if not self.hasEvents('cpu_idle'): + if not self.has_events('cpu_idle'): self.get_logger().warning('Events [cpu_idle] not found, ' 'cannot compute CPU active signal!') return None @@ -853,12 +853,12 @@ class Trace(Loggable): return handle_duplicate_index(cpu_active) @memoized - def getPeripheralClockEffectiveRate(self, clk_name): + def get_peripheral_clock_effective_rate(self, clk_name): logger = self.get_logger() if clk_name is None: logger.warning('no specified clk_name in computing peripheral clock, returning None') return - if not self.hasEvents('clock_set_rate'): + if not self.has_events('clock_set_rate'): logger.warning('Events [clock_set_rate] not found, returning None!') return rate_df = self.df_events('clock_set_rate') diff --git a/lisa/wa_results_collector.py b/lisa/wa_results_collector.py index ccc5521cc..e98150955 100644 --- a/lisa/wa_results_collector.py +++ b/lisa/wa_results_collector.py @@ -412,7 +412,7 @@ class WaResultsCollector(Loggable): # Helper to get area under curve of multiple CPU active signals def get_cpu_time(trace, cpus): - df = pd.DataFrame([trace.getCPUActiveSignal(cpu) for cpu in cpus]) + df = pd.DataFrame([trace.get_cpu_active_signal(cpu) for cpu in cpus]) return df.sum(axis=1).sum(axis=0) clusters = trace.platform.get('clusters') @@ -446,11 +446,11 @@ class WaResultsCollector(Loggable): 'cpu-seconds')) event = None - if trace.hasEvents('sched_load_cfs_rq'): + if trace.has_events('sched_load_cfs_rq'): event = 'sched_load_cfs_rq' row_filter = lambda r: r.path == '/' column = 'util' - elif trace.hasEvents('sched_load_avg_cpu'): + elif trace.has_events('sched_load_avg_cpu'): event = 'sched_load_avg_cpu' row_filter = lambda r: True column = 'util_avg' @@ -461,7 +461,7 @@ class WaResultsCollector(Loggable): avg_util_sum = area_under_curve(util_sum) / (util_sum.index[-1] - util_sum.index[0]) metrics.append(('avg_util_sum', avg_util_sum, None)) - if trace.hasEvents('thermal_temperature'): + if trace.has_events('thermal_temperature'): df = trace.df_events('thermal_temperature') for zone, zone_df in df.groupby('thermal_zone'): metrics.append(('tz_{}_start_temp'.format(zone), -- GitLab From 0b9ac6b18c205e02a50b17223c3bb9c709b5adab Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Mon, 10 Dec 2018 15:34:49 +0000 Subject: [PATCH 3/8] analysis: Factorize-out events checking Move events checking feature in trace.py, so it can be reused for tests. Allow checking arbitrary boolean combination of events in traces, instead than just a list of mandatory events. The event checker object can now be used as a decorator directly. --- doc/conf.py | 6 +- lisa/analysis/base.py | 46 +------ lisa/analysis/cpus.py | 5 +- lisa/analysis/frequency.py | 19 +-- lisa/analysis/idle.py | 11 +- lisa/analysis/latency.py | 24 ++-- lisa/analysis/status.py | 5 +- lisa/analysis/tasks.py | 9 +- lisa/analysis/thermal.py | 13 +- lisa/trace.py | 257 +++++++++++++++++++++++++++++++++++-- lisa/utils.py | 22 ++++ 11 files changed, 317 insertions(+), 100 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 3c376a957..7b204842d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -391,11 +391,7 @@ def autodoc_process_analysis_events(app, what, name, obj, options, lines): if what != 'method' or not hasattr(obj, "required_events"): return - events = obj.required_events - - events_doc = "\n:Required trace events:\n\n{}\n\n".format( - "\n".join([" * ``{}``".format(event) for event in events])) - + events_doc = "\n:Required trace events:\n\n{}\n\n".format(obj.required_events.doc_str()) lines.extend(events_doc.splitlines()) def setup(app): diff --git a/lisa/analysis/base.py b/lisa/analysis/base.py index 76e3abc24..531fe870c 100644 --- a/lisa/analysis/base.py +++ b/lisa/analysis/base.py @@ -32,39 +32,6 @@ COLOR_CYCLES = [ plt.rcParams['axes.prop_cycle'] = cycler(color=COLOR_CYCLES) -class MissingTraceEventError(RuntimeError): - """ - :param missing_events: The missing trace events - :type missing_events: list(str) - """ - def __init__(self, missing_events): - super().__init__( - "Trace is missing the following required events: {}".format(missing_events)) - - self.missing_events = missing_events - -def requires_events(events): - """ - Decorator for methods that require some given trace events - - :param events: The list of required events - :type events: list(str) - - The decorate method must inherit from :class:`AnalysisBase` - """ - def decorator(f): - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - self.check_events(events) - return f(self, *args, **kwargs) - - # Set an attribute on the wrapper itself, so it can be e.g. added - # to the method documentation - wrapper.required_events = sorted(set(events)) - return wrapper - - return decorator - class AnalysisBase(Loggable): """ Base class for Analysis modules. @@ -75,7 +42,7 @@ class AnalysisBase(Loggable): :Design notes: Method depending on certain trace events *must* be decorated with - :meth:`lisa.analysis.base.requires_events` + :meth:`lisa.trace.requires_events` Plotting methods *must* return the :class:`matplotlib.axes.Axes` instance used by the plotting method. This lets users further modify them. @@ -184,16 +151,5 @@ class AnalysisBase(Loggable): figure.savefig(filepath, format=img_format) - def check_events(self, required_events): - """ - Check that certain trace events are available in the trace - - :raises: MissingTraceEventError if some events are not available - """ - available_events = sorted(set(self.trace.available_events)) - missing_events = sorted(set(required_events).difference(available_events)) - - if missing_events: - raise MissingTraceEventError(missing_events) # vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 diff --git a/lisa/analysis/cpus.py b/lisa/analysis/cpus.py index fff331822..04377ff3a 100644 --- a/lisa/analysis/cpus.py +++ b/lisa/analysis/cpus.py @@ -20,7 +20,8 @@ import pandas as pd from lisa.utils import memoized -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase +from lisa.trace import requires_events class CpusAnalysis(AnalysisBase): @@ -62,7 +63,7 @@ class CpusAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(df_context_switches.required_events) + @df_context_switches.required_events def plot_context_switches(self, filepath=None): """ Plot histogram of context switches on each CPU. diff --git a/lisa/analysis/frequency.py b/lisa/analysis/frequency.py index 10f697dd7..3a2e13e43 100644 --- a/lisa/analysis/frequency.py +++ b/lisa/analysis/frequency.py @@ -25,8 +25,9 @@ from matplotlib.ticker import FuncFormatter import pandas as pd import pylab as pl -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase from lisa.utils import memoized +from lisa.trace import requires_events class FrequencyAnalysis(AnalysisBase): """ @@ -99,7 +100,7 @@ class FrequencyAnalysis(AnalysisBase): time_df["active_time"] = pd.DataFrame(index=available_freqs, data=nonidle_time) return time_df - @requires_events(_get_frequency_residency.required_events) + @_get_frequency_residency.required_events def df_cpu_frequency_residency(self, cpu): """ Get per-CPU frequency residency, i.e. amount of @@ -118,7 +119,7 @@ class FrequencyAnalysis(AnalysisBase): return self._get_frequency_residency((cpu,)) - @requires_events(_get_frequency_residency.required_events) + @_get_frequency_residency.required_events def df_domain_frequency_residency(self, cpu): """ Get per-frequency-domain frequency residency, i.e. amount of time each @@ -163,7 +164,7 @@ class FrequencyAnalysis(AnalysisBase): return pd.DataFrame(transitions) - @requires_events(df_cpu_frequency_transitions.required_events) + @df_cpu_frequency_transitions.required_events def df_cpu_frequency_transition_rate(self, cpu): """ Compute frequency transition rate of a given CPU. @@ -339,7 +340,7 @@ class FrequencyAnalysis(AnalysisBase): return axis - @requires_events(plot_cpu_frequencies.required_events) + @plot_cpu_frequencies.required_events def plot_domain_frequencies(self, filepath=None): """ Plot frequency trend for all frequency domains. @@ -362,7 +363,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @requires_events(df_cpu_frequency_residency.required_events) + @df_cpu_frequency_residency.required_events def plot_cpu_frequency_residency(self, cpu, filepath=None, pct=False, axes=None): """ Plot per-CPU frequency residency. @@ -411,7 +412,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @requires_events(plot_cpu_frequency_residency.required_events) + @plot_cpu_frequency_residency.required_events def plot_domain_frequency_residency(self, filepath=None, pct=False): """ Plot the frequency residency for all frequency domains. @@ -434,7 +435,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @requires_events(df_cpu_frequency_transitions.required_events) + @df_cpu_frequency_transitions.required_events def plot_cpu_frequency_transitions(self, cpu, filepath=None, pct=False, axis=None): """ Plot frequency transitions count of the specified CPU @@ -472,7 +473,7 @@ class FrequencyAnalysis(AnalysisBase): return axis - @requires_events(plot_cpu_frequency_transitions.required_events) + @plot_cpu_frequency_transitions.required_events def plot_domain_frequency_transitions(self, filepath=None, pct=False): """ Plot frequency transitions count for all frequency domains diff --git a/lisa/analysis/idle.py b/lisa/analysis/idle.py index 667cbb844..5660e0fe2 100644 --- a/lisa/analysis/idle.py +++ b/lisa/analysis/idle.py @@ -23,7 +23,8 @@ import pandas as pd from trappy.utils import handle_duplicate_index from lisa.utils import memoized -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase +from lisa.trace import requires_events class IdleAnalysis(AnalysisBase): @@ -75,7 +76,7 @@ class IdleAnalysis(AnalysisBase): # Fix sequences of wakeup/sleep events reported with the same index return handle_duplicate_index(cpu_active) - @requires_events(signal_cpu_active.required_events) + @signal_cpu_active.required_events def signal_cluster_active(self, cluster): """ Build a square wave representing the active (i.e. non-idle) cluster time @@ -244,7 +245,7 @@ class IdleAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(df_cpu_idle_state_residency.required_events) + @df_cpu_idle_state_residency.required_events def plot_cpu_idle_state_residency(self, cpu, filepath=None, pct=False): """ Plot the idle state residency of a CPU @@ -267,7 +268,7 @@ class IdleAnalysis(AnalysisBase): return axis - @requires_events(df_cluster_idle_state_residency.required_events) + @df_cluster_idle_state_residency.required_events def plot_cluster_idle_state_residency(self, cluster, filepath=None, pct=False, axis=None): """ @@ -298,7 +299,7 @@ class IdleAnalysis(AnalysisBase): return axis - @requires_events(plot_cluster_idle_state_residency.required_events) + @plot_cluster_idle_state_residency.required_events def plot_clusters_idle_state_residency(self, filepath=None, pct=False): """ Plot the idle state residency of all clusters diff --git a/lisa/analysis/latency.py b/lisa/analysis/latency.py index 272dd9784..f4e8e8656 100644 --- a/lisa/analysis/latency.py +++ b/lisa/analysis/latency.py @@ -18,7 +18,7 @@ import pandas as pd import numpy as np -from lisa.analysis.base import AnalysisBase, requires_events, COLOR_CYCLES +from lisa.analysis.base import AnalysisBase, COLOR_CYCLES from lisa.analysis.tasks import TaskState, TasksAnalysis from lisa.utils import memoized @@ -42,7 +42,7 @@ class LatencyAnalysis(AnalysisBase): # DataFrame Getter Methods ############################################################################### - @requires_events(TasksAnalysis.df_task_states.required_events) + @TasksAnalysis.df_task_states.required_events def df_latency_wakeup(self, task): """ DataFrame of a task's wakeup latencies @@ -63,7 +63,7 @@ class LatencyAnalysis(AnalysisBase): df.rename(columns={'delta' : 'wakeup_latency'}, inplace=True) return df - @requires_events(TasksAnalysis.df_task_states.required_events) + @TasksAnalysis.df_task_states.required_events def df_latency_preemption(self, task): """ DataFrame of a task's preemption latencies @@ -83,7 +83,7 @@ class LatencyAnalysis(AnalysisBase): df.rename(columns={'delta' : 'preempt_latency'}, inplace=True) return df - @requires_events(TasksAnalysis.df_task_states.required_events) + @TasksAnalysis.df_task_states.required_events def df_activations(self, task): """ DataFrame of a task's activations @@ -103,7 +103,7 @@ class LatencyAnalysis(AnalysisBase): return wkp_df[["activation_interval"]] - @requires_events(TasksAnalysis.df_task_states.required_events) + @TasksAnalysis.df_task_states.required_events def df_runtimes(self, task): """ DataFrame of task's runtime each time the task blocks @@ -173,7 +173,7 @@ class LatencyAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(df_latency_wakeup.required_events) + @df_latency_wakeup.required_events def plot_latencies(self, task, wakeup=True, preempt=True, threshold_ms=1, filepath=None): """ @@ -233,7 +233,7 @@ class LatencyAnalysis(AnalysisBase): above = 1 - below return df, above, below - @requires_events(df_latency_wakeup.required_events) + @df_latency_wakeup.required_events def _get_latencies_df(self, task, wakeup, preempt): wkp_df = None prt_df = None @@ -253,7 +253,7 @@ class LatencyAnalysis(AnalysisBase): return df - @requires_events(_get_latencies_df.required_events) + @_get_latencies_df.required_events def plot_latencies_cdf(self, task, wakeup=True, preempt=True, threshold_ms=1, filepath=None): """ @@ -292,7 +292,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @requires_events(_get_latencies_df.required_events) + @_get_latencies_df.required_events def plot_latencies_histogram(self, task, wakeup=True, preempt=True, threshold_ms=1, bins=64, filepath=None): """ @@ -326,7 +326,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @requires_events(df_latency_wakeup.required_events) + @df_latency_wakeup.required_events def plot_latency_bands(self, task, filepath=None, axis=None): """ Draw the task wakeup/preemption latencies as colored bands @@ -366,7 +366,7 @@ class LatencyAnalysis(AnalysisBase): return axis - @requires_events(df_activations.required_events) + @df_activations.required_events def plot_activations(self, task, filepath=None): """ Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_activations` of a task @@ -391,7 +391,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @requires_events(df_runtimes.required_events) + @df_runtimes.required_events def plot_runtimes(self, task, filepath=None): """ Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_runtimes` of a task diff --git a/lisa/analysis/status.py b/lisa/analysis/status.py index c72ebafe1..dd16cb444 100644 --- a/lisa/analysis/status.py +++ b/lisa/analysis/status.py @@ -19,7 +19,8 @@ """ System Status Analaysis Module """ -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase +from lisa.trace import requires_events class StatusAnalysis(AnalysisBase): """ @@ -67,7 +68,7 @@ class StatusAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(df_overutilized.required_events) + @df_overutilized.required_events def plot_overutilized(self, filepath=None, axis=None): """ Draw the system's overutilized status as colored bands diff --git a/lisa/analysis/tasks.py b/lisa/analysis/tasks.py index aeda5cd2a..0f8fcd4b5 100644 --- a/lisa/analysis/tasks.py +++ b/lisa/analysis/tasks.py @@ -20,8 +20,9 @@ from enum import Enum import numpy as np import pandas as pd -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase from lisa.utils import memoized +from lisa.trace import requires_events class StateInt(int): """ @@ -143,7 +144,7 @@ class TasksAnalysis(AnalysisBase): return df - @requires_events(df_tasks_wakeups.required_events) + @df_tasks_wakeups.required_events def df_top_wakeup(self, min_wakeups=100): """ Tasks which wakeup more frequently than a specified threshold. @@ -260,7 +261,7 @@ class TasksAnalysis(AnalysisBase): return task_state_df - @requires_events(df_task_states.required_events) + @df_task_states.required_events def df_task_total_residency(self, task): """ DataFrame of a task's execution time on each CPU @@ -343,7 +344,7 @@ class TasksAnalysis(AnalysisBase): return axis - @requires_events(df_task_total_residency.required_events) + @df_task_total_residency.required_events def plot_task_total_residency(self, task, filepath=None): """ Plot a task's total time spent on each CPU diff --git a/lisa/analysis/thermal.py b/lisa/analysis/thermal.py index 9dca51712..be750f001 100644 --- a/lisa/analysis/thermal.py +++ b/lisa/analysis/thermal.py @@ -19,8 +19,9 @@ from matplotlib.ticker import MaxNLocator from devlib.utils.misc import list_to_mask, mask_to_list -from lisa.analysis.base import AnalysisBase, requires_events +from lisa.analysis.base import AnalysisBase from lisa.utils import memoized +from lisa.trace import requires_events class ThermalAnalysis(AnalysisBase): @@ -99,7 +100,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @requires_events(df_thermal_zones_temperature.required_events) + @df_thermal_zones_temperature.required_events def thermal_zones(self): """ Get thermal zone ids that appear in the trace @@ -109,7 +110,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @requires_events(df_cpufreq_cooling_state.required_events) + @df_cpufreq_cooling_state.required_events def cpufreq_cdevs(self): """ Get cpufreq cooling devices that appear in the trace @@ -120,7 +121,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @requires_events(df_devfreq_cooling_state.required_events) + @df_devfreq_cooling_state.required_events def devfreq_cdevs(self): """ Get devfreq cooling devices that appear in the trace @@ -132,7 +133,7 @@ class ThermalAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(df_thermal_zones_temperature.required_events) + @df_thermal_zones_temperature.required_events def plot_thermal_zone_temperature(self, thermal_zone_id, filepath=None, axis=None): """ Plot temperature of thermal zones (all by default) @@ -164,7 +165,7 @@ class ThermalAnalysis(AnalysisBase): return axis - @requires_events(df_cpufreq_cooling_state.required_events) + @df_cpufreq_cooling_state.required_events def plot_cpu_cooling_states(self, cpu, filepath=None, axis=None): """ Plot the state evolution of a cpufreq cooling device diff --git a/lisa/trace.py b/lisa/trace.py index 7c1dc3c68..f2bead652 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -17,6 +17,7 @@ """ Trace Parser Module """ +import abc import numpy as np import os import os.path @@ -28,10 +29,9 @@ import warnings import operator import logging import webbrowser -from functools import reduce +from functools import reduce, wraps -from lisa.analysis.proxy import AnalysisProxy -from lisa.utils import Loggable, memoized +from lisa.utils import Loggable, memoized, deduplicate from lisa.platforms.platinfo import PlatformInfo from devlib.target import KernelVersion from trappy.utils import listify, handle_duplicate_index @@ -136,6 +136,9 @@ class Trace(Loggable): self._register_trace_events(events) self._parse_trace(self.data_dir, window, trace_format) + # Import here to avoid a circular dependency issue at import time + # with lisa.analysis.base + from lisa.analysis.proxy import AnalysisProxy self.analysis = AnalysisProxy(self) @property @@ -291,18 +294,25 @@ class Trace(Loggable): self.get_logger().warning('Failed to load tasks names from trace events') - def has_events(self, dataset): + def has_events(self, events): """ Returns True if the specified event is present in the parsed trace, False otherwise. - :param dataset: trace event name or list of trace events - :type dataset: str or list(str) + :param events: trace event name or list of trace events + :type events: str or list(str) or TraceEventCheckerBase """ - if isinstance(dataset, str): - return dataset in self.available_events - - return set(dataset).issubset(set(self.available_events)) + if isinstance(events, str): + return events in self.available_events + elif isinstance(events, TraceEventCheckerBase): + try: + events.check_events(self.available_events) + except MissingTraceEventError: + return False + else: + return True + else: + return set(events).issubset(set(self.available_events)) def _compute_timespan(self): """ @@ -1023,4 +1033,231 @@ class Trace(Loggable): return res_df +class TraceEventCheckerBase(abc.ABC, Loggable): + """ + ABC for events checker classes. + + Event checking can be achieved using a boolean expression on expected + events. + """ + @abc.abstractmethod + def check_events(self, event_set): + """ + Check that certain trace events are available in the given set of + events. + + :raises: MissingTraceEventError if some events are not available + """ + pass + + def __call__(self, f): + """ + Decorator for methods that require some given trace events + + :param events: The list of required events + :type events: list(str or TraceEventCheckerBase) + + The decorated method must operate on instances that have a ``self.trace`` + attribute. + """ + checker = self + + @wraps(f) + def wrapper(self, *args, **kwargs): + available_events = set(self.trace.available_events) + checker.check_events(available_events) + return f(self, *args, **kwargs) + + # Set an attribute on the wrapper itself, so it can be e.g. added + # to the method documentation + wrapper.required_events = checker + return wrapper + + @abc.abstractmethod + def _str_internal(self, style=None, wrapped=True): + """ + Format the boolean expression that this checker represents. + + :param style: When 'rst', a reStructuredText output is expected + :type style: str + + :param wrapped: When True, the expression should be wrapped with + parenthesis so it can be composed with other expressions. + :type wrapped: bool + """ + + pass + + def doc_str(self): + """ + Top-level function called by Sphinx's autodoc extension to augment + docstrings of the functions. + """ + return '\n * {}'.format(self._str_internal(style='rst', wrapped=False)) + + def __str__(self): + return self._str_internal() + +class TraceEventChecker(TraceEventCheckerBase): + """ + Check for one single event. + """ + def __init__(self, event): + self.event = event + + def check_events(self, event_set): + if self.event not in event_set: + raise MissingTraceEventError(self) + + def _str_internal(self, style=None, wrapped=True): + template = '``{}``' if style == 'rst' else '{}' + return template.format(self.event) + +class AssociativeTraceEventChecker(TraceEventCheckerBase): + """ + Base class for associative operators like `and` and `or` + """ + def __init__(self, op_str, event_checkers): + checker_list = [] + for checker in event_checkers: + # "unwrap" checkers of the same type, to avoid useless levels of + # nesting. This is valid since the operator is known to be + # associative. We don't use isinstance to avoid merging checkers + # that may have different semantics. + if type(checker) is type(self): + checker_list.extend(checker.checkers) + else: + checker_list.append(checker) + + # Avoid having the same event twice at the same level + def key(checker): + if isinstance(checker, TraceEventChecker): + return checker.event + else: + return checker + checker_list = deduplicate(checker_list, key=key) + + self.checkers = checker_list + self.op_str = op_str + + @classmethod + def from_events(cls, events): + """ + Build an instance of the class, converting ``str`` to + ``TraceEventChecker``. + + :param events: Sequence of events + :type events: list(str or TraceEventCheckerBase) + """ + return cls({ + e if isinstance(e, TraceEventCheckerBase) else TraceEventChecker(e) + for e in events + }) + + def _str_internal(self, style=None, wrapped=True): + op_str = ' {} '.format(self.op_str) + # Sort for stable output + checker_list = sorted(self.checkers, key=lambda c: str(c)) + unwrapped_str = op_str.join( + c._str_internal(style=style, wrapped=True) + for c in checker_list + ) + + template = '({})' if len(self.checkers) > 1 and wrapped else '{}' + return template.format(unwrapped_str) + +class OrTraceEventChecker(AssociativeTraceEventChecker): + """ + Check that one of the given event checkers is satisfied. + + :param event_checkers: Event checkers to check for + :type event_checkers: list(TraceEventCheckerBase) + """ + def __init__(self, event_checkers): + super().__init__('or', event_checkers) + + def check_events(self, event_set): + if not self.checkers: + return + + failed_checker_set = set() + for checker in self.checkers: + try: + checker.check_events(event_set) + except MissingTraceEventError as e: + failed_checker_set.add(e.missing_events) + else: + break + else: + cls = type(self) + raise MissingTraceEventError( + cls(failed_checker_set) + ) + +class AndTraceEventChecker(AssociativeTraceEventChecker): + """ + Check that all the given event checkers are satisfied. + + :param event_checkers: Event checkers to check for + :type event_checkers: list(TraceEventCheckerBase) + """ + def __init__(self, event_checkers): + super().__init__('and', event_checkers) + + def check_events(self, event_set): + if not self.checkers: + return + + failed_checker_set = set() + for checker in self.checkers: + try: + checker.check_events(event_set) + except MissingTraceEventError as e: + failed_checker_set.add(e.missing_events) + + if failed_checker_set: + cls = type(self) + raise MissingTraceEventError( + cls(failed_checker_set) + ) + + def doc_str(self): + joiner = '\n' + ' ' + rst = joiner + joiner.join( + '* {}'.format(c._str_internal(style='rst', wrapped=False)) + # Sort for stable output + for c in sorted(self.checkers, key=lambda c: str(c)) + ) + return rst + +def requires_events(events): + """ + Decorator for methods that require some given trace events. + + :param events: The list of required events + :type events: list(str or TraceEventCheckerBase) + + The decorated method must operate on instances that have a + ``self.trace`` attribute. + """ + return AndTraceEventChecker.from_events(events) + +def requires_one_event_of(*events): + """ + Same as :func:``required_events`` with logical `OR` semantic. + """ + return OrTraceEventChecker.from_events(events) + +class MissingTraceEventError(RuntimeError): + """ + :param missing_events: The missing trace events + :type missing_events: TraceEventCheckerBase + """ + def __init__(self, missing_events): + super().__init__( + "Trace is missing the following required events: {}".format(missing_events)) + + self.missing_events = missing_events + + # vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 diff --git a/lisa/utils.py b/lisa/utils.py index 183478b9d..c2e62e950 100644 --- a/lisa/utils.py +++ b/lisa/utils.py @@ -487,4 +487,26 @@ def groupby(iterable, key=None): iterable = sorted(iterable, key=key) return itertools.groupby(iterable, key=key) +def deduplicate(seq, keep_last=True, key=lambda x: x): + """ + Deduplicate items in the given sequence and return a list. + :param seq: Sequence to deduplicate + :type Seq: collections.abc.Sequence + + :param key: Key function that will be used to determine duplication. It + takes one item at a time, returning a hashable key value + :type key: collections.abc.Callable + + :param keep_last: If True, will keep the last occurence of each duplicated + items. Otherwise, keep the first occurence. + :type keep_last: bool + """ + reorder = (lambda seq: seq) if keep_last else reversed + # Use an OrderedDict to keep original ordering of the sequence + dedup = OrderedDict( + (key(x), x) + for x in reorder(seq) + ) + return list(reorder(dedup.values())) + # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab -- GitLab From f9bcfa29d7b24c6cca353ffcd67ff4cf63b9a7e4 Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Tue, 11 Dec 2018 16:53:25 +0000 Subject: [PATCH 4/8] tests: lisa: Add tests for trace.TraceEventChecker --- lisa/tests/lisa/test_events_checkers.py | 86 +++++++++++++++++++++++++ lisa/tests/lisa/utils.py | 14 ++++ 2 files changed, 100 insertions(+) create mode 100644 lisa/tests/lisa/test_events_checkers.py diff --git a/lisa/tests/lisa/test_events_checkers.py b/lisa/tests/lisa/test_events_checkers.py new file mode 100644 index 000000000..d6255ba15 --- /dev/null +++ b/lisa/tests/lisa/test_events_checkers.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2018, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import TestCase +from lisa.tests.lisa.utils import nullcontext + +from lisa.trace import TraceEventChecker, AndTraceEventChecker, OrTraceEventChecker, MissingTraceEventError + +""" A test suite for event checking infrastructure.""" + + +class TestEventCheckerBase: + """ + A test class that verifies checkers work as expected + """ + EVENTS_SET = {'foo', 'bar', 'baz'} + expected_success = True + + def test_check_events(self): + if self.expected_success: + cm = nullcontext() + else: + cm = self.assertRaises(MissingTraceEventError) + + with cm: + print('Checking: {}'.format(self.checker)) + self.checker.check_events(self.EVENTS_SET) + +class TestEventChecker_and1(TestEventCheckerBase, TestCase): + checker = AndTraceEventChecker.from_events(['foo', 'bar']) + +class TestEventChecker_and2(TestEventCheckerBase, TestCase): + checker = AndTraceEventChecker.from_events(['foo', 'lancelot']) + expected_success = False + +class TestEventChecker_or1(TestEventCheckerBase, TestCase): + checker = OrTraceEventChecker.from_events(['foo', 'bar']) + +class TestEventChecker_or2(TestEventCheckerBase, TestCase): + checker = OrTraceEventChecker.from_events(['foo', 'lancelot']) + +class TestEventChecker_or3(TestEventCheckerBase, TestCase): + checker = OrTraceEventChecker.from_events(['arthur', 'lancelot']) + expected_success = False + +class TestEventChecker_single1(TestEventCheckerBase, TestCase): + checker = TraceEventChecker('bar') + +class TestEventChecker_single2(TestEventCheckerBase, TestCase): + checker = TraceEventChecker('non-existing-event') + expected_success = False + +class TestEventChecker_and3(TestEventCheckerBase, TestCase): + checker = AndTraceEventChecker.from_events([ + TestEventChecker_and1.checker, + TestEventChecker_or1.checker, + ]) + +class TestEventChecker_and4(TestEventCheckerBase, TestCase): + checker = AndTraceEventChecker.from_events([ + TestEventChecker_and1.checker, + TestEventChecker_or2.checker, + ]) + +class TestEventChecker_and5(TestEventCheckerBase, TestCase): + checker = AndTraceEventChecker.from_events([ + TestEventChecker_and1.checker, + TestEventChecker_and2.checker, + ]) + expected_success = False + +# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab diff --git a/lisa/tests/lisa/utils.py b/lisa/tests/lisa/utils.py index 45cc78070..4da17aa45 100644 --- a/lisa/tests/lisa/utils.py +++ b/lisa/tests/lisa/utils.py @@ -55,3 +55,17 @@ class StorageTestCase(TestCase): def tearDown(self): shutil.rmtree(self.res_dir) + +def nullcontext(enter_result=None): + """ + Backport of Python 3.7 contextlib.nullcontext + """ + + class CM: + def __enter__(self): + return enter_result + + def __exit__(self, *args, **kwargs): + return + + return CM() -- GitLab From 6d5f068fa45ddfe268cc5693b950a54f1764c3ec Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Mon, 10 Dec 2018 17:48:42 +0000 Subject: [PATCH 5/8] trace: Make trace.requires_events invocation lighter Since there is a lower-level API if more control is needed, we can use all parameters requires_events to specify the events and remove the need to build a list first. --- lisa/analysis/cpus.py | 2 +- lisa/analysis/frequency.py | 8 ++++---- lisa/analysis/idle.py | 8 ++++---- lisa/analysis/status.py | 2 +- lisa/analysis/tasks.py | 16 ++++++++-------- lisa/analysis/thermal.py | 6 +++--- lisa/trace.py | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/lisa/analysis/cpus.py b/lisa/analysis/cpus.py index 04377ff3a..335e7db00 100644 --- a/lisa/analysis/cpus.py +++ b/lisa/analysis/cpus.py @@ -39,7 +39,7 @@ class CpusAnalysis(AnalysisBase): # DataFrame Getter Methods ############################################################################### - @requires_events(['sched_switch']) + @requires_events('sched_switch') def df_context_switches(self): """ Compute number of context switches on each CPU. diff --git a/lisa/analysis/frequency.py b/lisa/analysis/frequency.py index 3a2e13e43..cd8ee122a 100644 --- a/lisa/analysis/frequency.py +++ b/lisa/analysis/frequency.py @@ -43,7 +43,7 @@ class FrequencyAnalysis(AnalysisBase): super(FrequencyAnalysis, self).__init__(trace) @memoized - @requires_events(['cpu_frequency', 'cpu_idle']) + @requires_events('cpu_frequency', 'cpu_idle') def _get_frequency_residency(self, cpus): """ Get a DataFrame with per cluster frequency residency, i.e. amount of @@ -138,7 +138,7 @@ class FrequencyAnalysis(AnalysisBase): if cpu in domain: return self._get_frequency_residency(tuple(domain)) - @requires_events(['cpu_frequency']) + @requires_events('cpu_frequency') def df_cpu_frequency_transitions(self, cpu): """ Compute number of frequency transitions of a given CPU. @@ -184,7 +184,7 @@ class FrequencyAnalysis(AnalysisBase): lambda x: x / (self.trace.x_max - self.trace.x_min) ) - @requires_events(['cpu_frequency']) + @requires_events('cpu_frequency') def get_average_cpu_frequency(self, cpu): """ Get the average frequency for a given CPU @@ -286,7 +286,7 @@ class FrequencyAnalysis(AnalysisBase): pl.savefig(figname, bbox_inches='tight') - @requires_events(['cpu_frequency']) + @requires_events('cpu_frequency') def plot_cpu_frequencies(self, cpu, filepath=None, axis=None): """ Plot frequency for the specified CPU diff --git a/lisa/analysis/idle.py b/lisa/analysis/idle.py index 5660e0fe2..295a2c4a2 100644 --- a/lisa/analysis/idle.py +++ b/lisa/analysis/idle.py @@ -45,7 +45,7 @@ class IdleAnalysis(AnalysisBase): ############################################################################### @memoized - @requires_events(['cpu_idle']) + @requires_events('cpu_idle') def signal_cpu_active(self, cpu): """ Build a square wave representing the active (i.e. non-idle) CPU time @@ -110,7 +110,7 @@ class IdleAnalysis(AnalysisBase): return cluster_active - @requires_events(['cpu_idle']) + @requires_events('cpu_idle') def df_cpus_wakeups(self): """" Get a DataFrame showing when CPUs have woken from idle @@ -133,7 +133,7 @@ class IdleAnalysis(AnalysisBase): return pd.DataFrame({'cpu': sr}).sort_index() - @requires_events(["cpu_idle"]) + @requires_events("cpu_idle") def df_cpu_idle_state_residency(self, cpu): """ Compute time spent by a given CPU in each idle state. @@ -182,7 +182,7 @@ class IdleAnalysis(AnalysisBase): idle_time_df.index.name = 'idle_state' return idle_time_df - @requires_events(['cpu_idle']) + @requires_events('cpu_idle') def df_cluster_idle_state_residency(self, cluster): """ Compute time spent by a given cluster in each idle state. diff --git a/lisa/analysis/status.py b/lisa/analysis/status.py index dd16cb444..92afc01f1 100644 --- a/lisa/analysis/status.py +++ b/lisa/analysis/status.py @@ -40,7 +40,7 @@ class StatusAnalysis(AnalysisBase): # DataFrame Getter Methods ############################################################################### - @requires_events(["sched_overutilized"]) + @requires_events("sched_overutilized") def df_overutilized(self): """ Get overutilized events diff --git a/lisa/analysis/tasks.py b/lisa/analysis/tasks.py index 0f8fcd4b5..a03b5f496 100644 --- a/lisa/analysis/tasks.py +++ b/lisa/analysis/tasks.py @@ -126,7 +126,7 @@ class TasksAnalysis(AnalysisBase): # DataFrame Getter Methods ############################################################################### - @requires_events(['sched_wakeup']) + @requires_events('sched_wakeup') def df_tasks_wakeups(self): """ The number of wakeups per task @@ -160,7 +160,7 @@ class TasksAnalysis(AnalysisBase): return df - @requires_events(['sched_switch']) + @requires_events('sched_switch') def df_rt_tasks(self, min_prio=100): """ Tasks with RT priority @@ -199,7 +199,7 @@ class TasksAnalysis(AnalysisBase): return rt_tasks - @requires_events(['sched_switch', 'sched_wakeup']) + @requires_events('sched_switch', 'sched_wakeup') def df_task_states(self, task): """ DataFrame of task's state updates events @@ -295,7 +295,7 @@ class TasksAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @requires_events(['sched_switch']) + @requires_events('sched_switch') def plot_task_residency(self, task, filepath=None): """ Plot on which CPUs the task ran on over time @@ -392,7 +392,7 @@ class TasksAnalysis(AnalysisBase): return fig, axis - @requires_events(["sched_wakeup"]) + @requires_events("sched_wakeup") def plot_tasks_wakeups(self, target_cpus=None, time_delta=0.01, filepath=None): """ Plot task wakeups over time @@ -421,7 +421,7 @@ class TasksAnalysis(AnalysisBase): return axis - @requires_events(["sched_wakeup"]) + @requires_events("sched_wakeup") def plot_tasks_wakeups_heatmap(self, xbins=100, colormap=None, filepath=None): """ :param xbins: Number of x-axis bins, i.e. in how many slices should @@ -445,7 +445,7 @@ class TasksAnalysis(AnalysisBase): return axis - @requires_events(["sched_wakeup_new"]) + @requires_events("sched_wakeup_new") def plot_tasks_forks(self, target_cpus=None, time_delta=0.01, filepath=None): """ Plot task forks over time @@ -474,7 +474,7 @@ class TasksAnalysis(AnalysisBase): return axis - @requires_events(["sched_wakeup_new"]) + @requires_events("sched_wakeup_new") def plot_tasks_forks_heatmap(self, xbins=100, colormap=None, filepath=None): """ :param xbins: Number of x-axis bins, i.e. in how many slices should diff --git a/lisa/analysis/thermal.py b/lisa/analysis/thermal.py index be750f001..ce4f61d26 100644 --- a/lisa/analysis/thermal.py +++ b/lisa/analysis/thermal.py @@ -34,7 +34,7 @@ class ThermalAnalysis(AnalysisBase): name = 'thermal' - @requires_events(["thermal_temperature"]) + @requires_events("thermal_temperature") def df_thermal_zones_temperature(self): """ Get the temperature of the thermal zones @@ -50,7 +50,7 @@ class ThermalAnalysis(AnalysisBase): return df - @requires_events(["thermal_power_cpu_limit"]) + @requires_events("thermal_power_cpu_limit") def df_cpufreq_cooling_state(self, cpus=None): """ Get cpufreq cooling device states @@ -76,7 +76,7 @@ class ThermalAnalysis(AnalysisBase): return df - @requires_events(["thermal_power_devfreq_limit"]) + @requires_events("thermal_power_devfreq_limit") def df_devfreq_cooling_state(self, devices=None): """ Get devfreq cooling device states diff --git a/lisa/trace.py b/lisa/trace.py index f2bead652..18ed1261d 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -1230,7 +1230,7 @@ class AndTraceEventChecker(AssociativeTraceEventChecker): ) return rst -def requires_events(events): +def requires_events(*events): """ Decorator for methods that require some given trace events. -- GitLab From b0ea4ed6452ca45bc79f50543f5d3465ccdd93ed Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Tue, 11 Dec 2018 14:51:31 +0000 Subject: [PATCH 6/8] trace: Rename required_events into used_events That better reflects the new way of checking events, and leaves the door open to optional events as well --- doc/conf.py | 4 ++-- lisa/analysis/cpus.py | 2 +- lisa/analysis/frequency.py | 18 +++++++++--------- lisa/analysis/idle.py | 8 ++++---- lisa/analysis/latency.py | 26 +++++++++++++------------- lisa/analysis/load_tracking.py | 6 +++--- lisa/analysis/status.py | 2 +- lisa/analysis/tasks.py | 8 ++++---- lisa/analysis/thermal.py | 10 +++++----- lisa/trace.py | 4 ++-- 10 files changed, 44 insertions(+), 44 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 7b204842d..21f36231a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -388,10 +388,10 @@ def autodoc_process_test_method(app, what, name, obj, options, lines): def autodoc_process_analysis_events(app, what, name, obj, options, lines): # Append the list of required trace events - if what != 'method' or not hasattr(obj, "required_events"): + if what != 'method' or not hasattr(obj, "used_events"): return - events_doc = "\n:Required trace events:\n\n{}\n\n".format(obj.required_events.doc_str()) + events_doc = "\n:Required trace events:\n\n{}\n\n".format(obj.used_events.doc_str()) lines.extend(events_doc.splitlines()) def setup(app): diff --git a/lisa/analysis/cpus.py b/lisa/analysis/cpus.py index 335e7db00..ce20b4615 100644 --- a/lisa/analysis/cpus.py +++ b/lisa/analysis/cpus.py @@ -63,7 +63,7 @@ class CpusAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @df_context_switches.required_events + @df_context_switches.used_events def plot_context_switches(self, filepath=None): """ Plot histogram of context switches on each CPU. diff --git a/lisa/analysis/frequency.py b/lisa/analysis/frequency.py index cd8ee122a..21dab7ccf 100644 --- a/lisa/analysis/frequency.py +++ b/lisa/analysis/frequency.py @@ -100,7 +100,7 @@ class FrequencyAnalysis(AnalysisBase): time_df["active_time"] = pd.DataFrame(index=available_freqs, data=nonidle_time) return time_df - @_get_frequency_residency.required_events + @_get_frequency_residency.used_events def df_cpu_frequency_residency(self, cpu): """ Get per-CPU frequency residency, i.e. amount of @@ -119,7 +119,7 @@ class FrequencyAnalysis(AnalysisBase): return self._get_frequency_residency((cpu,)) - @_get_frequency_residency.required_events + @_get_frequency_residency.used_events def df_domain_frequency_residency(self, cpu): """ Get per-frequency-domain frequency residency, i.e. amount of time each @@ -164,7 +164,7 @@ class FrequencyAnalysis(AnalysisBase): return pd.DataFrame(transitions) - @df_cpu_frequency_transitions.required_events + @df_cpu_frequency_transitions.used_events def df_cpu_frequency_transition_rate(self, cpu): """ Compute frequency transition rate of a given CPU. @@ -322,7 +322,7 @@ class FrequencyAnalysis(AnalysisBase): label="average") plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1) @@ -340,7 +340,7 @@ class FrequencyAnalysis(AnalysisBase): return axis - @plot_cpu_frequencies.required_events + @plot_cpu_frequencies.used_events def plot_domain_frequencies(self, filepath=None): """ Plot frequency trend for all frequency domains. @@ -363,7 +363,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @df_cpu_frequency_residency.required_events + @df_cpu_frequency_residency.used_events def plot_cpu_frequency_residency(self, cpu, filepath=None, pct=False, axes=None): """ Plot per-CPU frequency residency. @@ -412,7 +412,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @plot_cpu_frequency_residency.required_events + @plot_cpu_frequency_residency.used_events def plot_domain_frequency_residency(self, filepath=None, pct=False): """ Plot the frequency residency for all frequency domains. @@ -435,7 +435,7 @@ class FrequencyAnalysis(AnalysisBase): return axes - @df_cpu_frequency_transitions.required_events + @df_cpu_frequency_transitions.used_events def plot_cpu_frequency_transitions(self, cpu, filepath=None, pct=False, axis=None): """ Plot frequency transitions count of the specified CPU @@ -473,7 +473,7 @@ class FrequencyAnalysis(AnalysisBase): return axis - @plot_cpu_frequency_transitions.required_events + @plot_cpu_frequency_transitions.used_events def plot_domain_frequency_transitions(self, filepath=None, pct=False): """ Plot frequency transitions count for all frequency domains diff --git a/lisa/analysis/idle.py b/lisa/analysis/idle.py index 295a2c4a2..3aaafcf22 100644 --- a/lisa/analysis/idle.py +++ b/lisa/analysis/idle.py @@ -76,7 +76,7 @@ class IdleAnalysis(AnalysisBase): # Fix sequences of wakeup/sleep events reported with the same index return handle_duplicate_index(cpu_active) - @signal_cpu_active.required_events + @signal_cpu_active.used_events def signal_cluster_active(self, cluster): """ Build a square wave representing the active (i.e. non-idle) cluster time @@ -245,7 +245,7 @@ class IdleAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @df_cpu_idle_state_residency.required_events + @df_cpu_idle_state_residency.used_events def plot_cpu_idle_state_residency(self, cpu, filepath=None, pct=False): """ Plot the idle state residency of a CPU @@ -268,7 +268,7 @@ class IdleAnalysis(AnalysisBase): return axis - @df_cluster_idle_state_residency.required_events + @df_cluster_idle_state_residency.used_events def plot_cluster_idle_state_residency(self, cluster, filepath=None, pct=False, axis=None): """ @@ -299,7 +299,7 @@ class IdleAnalysis(AnalysisBase): return axis - @plot_cluster_idle_state_residency.required_events + @plot_cluster_idle_state_residency.used_events def plot_clusters_idle_state_residency(self, filepath=None, pct=False): """ Plot the idle state residency of all clusters diff --git a/lisa/analysis/latency.py b/lisa/analysis/latency.py index f4e8e8656..d3d9ca16f 100644 --- a/lisa/analysis/latency.py +++ b/lisa/analysis/latency.py @@ -42,7 +42,7 @@ class LatencyAnalysis(AnalysisBase): # DataFrame Getter Methods ############################################################################### - @TasksAnalysis.df_task_states.required_events + @TasksAnalysis.df_task_states.used_events def df_latency_wakeup(self, task): """ DataFrame of a task's wakeup latencies @@ -63,7 +63,7 @@ class LatencyAnalysis(AnalysisBase): df.rename(columns={'delta' : 'wakeup_latency'}, inplace=True) return df - @TasksAnalysis.df_task_states.required_events + @TasksAnalysis.df_task_states.used_events def df_latency_preemption(self, task): """ DataFrame of a task's preemption latencies @@ -83,7 +83,7 @@ class LatencyAnalysis(AnalysisBase): df.rename(columns={'delta' : 'preempt_latency'}, inplace=True) return df - @TasksAnalysis.df_task_states.required_events + @TasksAnalysis.df_task_states.used_events def df_activations(self, task): """ DataFrame of a task's activations @@ -103,7 +103,7 @@ class LatencyAnalysis(AnalysisBase): return wkp_df[["activation_interval"]] - @TasksAnalysis.df_task_states.required_events + @TasksAnalysis.df_task_states.used_events def df_runtimes(self, task): """ DataFrame of task's runtime each time the task blocks @@ -173,7 +173,7 @@ class LatencyAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @df_latency_wakeup.required_events + @df_latency_wakeup.used_events def plot_latencies(self, task, wakeup=True, preempt=True, threshold_ms=1, filepath=None): """ @@ -233,7 +233,7 @@ class LatencyAnalysis(AnalysisBase): above = 1 - below return df, above, below - @df_latency_wakeup.required_events + @df_latency_wakeup.used_events def _get_latencies_df(self, task, wakeup, preempt): wkp_df = None prt_df = None @@ -253,7 +253,7 @@ class LatencyAnalysis(AnalysisBase): return df - @_get_latencies_df.required_events + @_get_latencies_df.used_events def plot_latencies_cdf(self, task, wakeup=True, preempt=True, threshold_ms=1, filepath=None): """ @@ -292,7 +292,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @_get_latencies_df.required_events + @_get_latencies_df.used_events def plot_latencies_histogram(self, task, wakeup=True, preempt=True, threshold_ms=1, bins=64, filepath=None): """ @@ -326,7 +326,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @df_latency_wakeup.required_events + @df_latency_wakeup.used_events def plot_latency_bands(self, task, filepath=None, axis=None): """ Draw the task wakeup/preemption latencies as colored bands @@ -366,7 +366,7 @@ class LatencyAnalysis(AnalysisBase): return axis - @df_activations.required_events + @df_activations.used_events def plot_activations(self, task, filepath=None): """ Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_activations` of a task @@ -381,7 +381,7 @@ class LatencyAnalysis(AnalysisBase): wkp_df.plot(style='+', logy=False, ax=axis) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_title("Activation intervals of task \"{}\"".format(task)) @@ -391,7 +391,7 @@ class LatencyAnalysis(AnalysisBase): self.save_plot(fig, filepath) return axis - @df_runtimes.required_events + @df_runtimes.used_events def plot_runtimes(self, task, filepath=None): """ Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_runtimes` of a task @@ -406,7 +406,7 @@ class LatencyAnalysis(AnalysisBase): df.plot(style='+', ax=axis) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_title("Per-activation runtimes of task \"{}\"".format(task)) diff --git a/lisa/analysis/load_tracking.py b/lisa/analysis/load_tracking.py index ad2af0524..4f0d6e883 100644 --- a/lisa/analysis/load_tracking.py +++ b/lisa/analysis/load_tracking.py @@ -206,7 +206,7 @@ class LoadTrackingAnalysis(AnalysisBase): # Add overutilized signal to the plot plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_ylim(0, 1100) @@ -234,7 +234,7 @@ class LoadTrackingAnalysis(AnalysisBase): df[['load']].plot(ax=axis, drawstyle='steps-post', alpha=0.4) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_title('Load-tracking signals of task "{}"'.format(task)) @@ -322,7 +322,7 @@ class LoadTrackingAnalysis(AnalysisBase): df[df.placement == stat]["__cpu"].plot(ax=axis, style="+", label=stat) plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) axis.set_title("Utilization vs placement of task \"{}\"".format(task)) diff --git a/lisa/analysis/status.py b/lisa/analysis/status.py index 92afc01f1..aa9d665e0 100644 --- a/lisa/analysis/status.py +++ b/lisa/analysis/status.py @@ -68,7 +68,7 @@ class StatusAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @df_overutilized.required_events + @df_overutilized.used_events def plot_overutilized(self, filepath=None, axis=None): """ Draw the system's overutilized status as colored bands diff --git a/lisa/analysis/tasks.py b/lisa/analysis/tasks.py index a03b5f496..ba828d30d 100644 --- a/lisa/analysis/tasks.py +++ b/lisa/analysis/tasks.py @@ -144,7 +144,7 @@ class TasksAnalysis(AnalysisBase): return df - @df_tasks_wakeups.required_events + @df_tasks_wakeups.used_events def df_top_wakeup(self, min_wakeups=100): """ Tasks which wakeup more frequently than a specified threshold. @@ -261,7 +261,7 @@ class TasksAnalysis(AnalysisBase): return task_state_df - @df_task_states.required_events + @df_task_states.used_events def df_task_total_residency(self, task): """ DataFrame of a task's execution time on each CPU @@ -328,7 +328,7 @@ class TasksAnalysis(AnalysisBase): sw_df["__cpu"].plot(ax=axis, style='+') plot_overutilized = self.trace.analysis.status.plot_overutilized - if self.trace.has_events(plot_overutilized.required_events): + if self.trace.has_events(plot_overutilized.used_events): plot_overutilized(axis=axis) # Add an extra CPU lane to make room for the legend @@ -344,7 +344,7 @@ class TasksAnalysis(AnalysisBase): return axis - @df_task_total_residency.required_events + @df_task_total_residency.used_events def plot_task_total_residency(self, task, filepath=None): """ Plot a task's total time spent on each CPU diff --git a/lisa/analysis/thermal.py b/lisa/analysis/thermal.py index ce4f61d26..0b49964b6 100644 --- a/lisa/analysis/thermal.py +++ b/lisa/analysis/thermal.py @@ -100,7 +100,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @df_thermal_zones_temperature.required_events + @df_thermal_zones_temperature.used_events def thermal_zones(self): """ Get thermal zone ids that appear in the trace @@ -110,7 +110,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @df_cpufreq_cooling_state.required_events + @df_cpufreq_cooling_state.used_events def cpufreq_cdevs(self): """ Get cpufreq cooling devices that appear in the trace @@ -121,7 +121,7 @@ class ThermalAnalysis(AnalysisBase): @property @memoized - @df_devfreq_cooling_state.required_events + @df_devfreq_cooling_state.used_events def devfreq_cdevs(self): """ Get devfreq cooling devices that appear in the trace @@ -133,7 +133,7 @@ class ThermalAnalysis(AnalysisBase): # Plotting Methods ############################################################################### - @df_thermal_zones_temperature.required_events + @df_thermal_zones_temperature.used_events def plot_thermal_zone_temperature(self, thermal_zone_id, filepath=None, axis=None): """ Plot temperature of thermal zones (all by default) @@ -165,7 +165,7 @@ class ThermalAnalysis(AnalysisBase): return axis - @df_cpufreq_cooling_state.required_events + @df_cpufreq_cooling_state.used_events def plot_cpu_cooling_states(self, cpu, filepath=None, axis=None): """ Plot the state evolution of a cpufreq cooling device diff --git a/lisa/trace.py b/lisa/trace.py index 18ed1261d..393eed141 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -1070,7 +1070,7 @@ class TraceEventCheckerBase(abc.ABC, Loggable): # Set an attribute on the wrapper itself, so it can be e.g. added # to the method documentation - wrapper.required_events = checker + wrapper.used_events = checker return wrapper @abc.abstractmethod @@ -1244,7 +1244,7 @@ def requires_events(*events): def requires_one_event_of(*events): """ - Same as :func:``required_events`` with logical `OR` semantic. + Same as :func:``used_events`` with logical `OR` semantic. """ return OrTraceEventChecker.from_events(events) -- GitLab From fc8a27a1bc9d42340adb544b3182f41634bc9ce5 Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Mon, 10 Dec 2018 18:13:28 +0000 Subject: [PATCH 7/8] tests: eas_behaviour: Check required events in trace Check the required events are available in the trace before running the test method --- lisa/tests/kernel/scheduler/eas_behaviour.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lisa/tests/kernel/scheduler/eas_behaviour.py b/lisa/tests/kernel/scheduler/eas_behaviour.py index 7a11128ef..b8969839d 100644 --- a/lisa/tests/kernel/scheduler/eas_behaviour.py +++ b/lisa/tests/kernel/scheduler/eas_behaviour.py @@ -32,6 +32,7 @@ from lisa.tests.kernel.test_bundle import ResultBundle, CannotCreateError, RTATe from lisa.env import TestEnv from lisa.utils import ArtifactPath from lisa.energy_model import EnergyModel +from lisa.trace import requires_events class EASBehaviour(RTATestBundle, abc.ABC): """ @@ -335,6 +336,7 @@ class EASBehaviour(RTATestBundle, abc.ABC): return self._sort_power_df_columns(df.apply(est_power, axis=1), nrg_model) + @requires_events('sched_switch') def test_task_placement(self, energy_est_threshold_pct=5, nrg_model:EnergyModel=None) -> ResultBundle: """ Test that task placement was energy-efficient @@ -414,6 +416,7 @@ class ThreeSmallTasks(EASBehaviour): """ task_prefix = "small" + @EASBehaviour.test_task_placement.used_events def test_task_placement(self, energy_est_threshold_pct=20, nrg_model:EnergyModel=None) -> ResultBundle: """ Same as :meth:`EASBehaviour.test_task_placement` but with a higher @@ -534,6 +537,7 @@ class RampUp(EASBehaviour): """ task_name = "ramp_up" + @EASBehaviour.test_task_placement.used_events def test_task_placement(self, energy_est_threshold_pct=15, nrg_model:EnergyModel=None) -> ResultBundle: """ Same as :meth:`EASBehaviour.test_task_placement` but with a higher @@ -570,6 +574,7 @@ class RampDown(EASBehaviour): """ task_name = "ramp_down" + @EASBehaviour.test_task_placement.used_events def test_task_placement(self, energy_est_threshold_pct=18, nrg_model:EnergyModel=None) -> ResultBundle: """ Same as :meth:`EASBehaviour.test_task_placement` but with a higher -- GitLab From 2630266c0da952e1fa85a87d45808fa45bab59df Mon Sep 17 00:00:00 2001 From: Douglas RAILLARD Date: Tue, 11 Dec 2018 15:41:39 +0000 Subject: [PATCH 8/8] misfit: Add required events decorators --- lisa/tests/kernel/scheduler/misfit.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lisa/tests/kernel/scheduler/misfit.py b/lisa/tests/kernel/scheduler/misfit.py index cc13d78b7..ab8e844bb 100644 --- a/lisa/tests/kernel/scheduler/misfit.py +++ b/lisa/tests/kernel/scheduler/misfit.py @@ -20,11 +20,11 @@ import pandas as pd from devlib.module.sched import SchedDomain, SchedDomainFlag from lisa.utils import memoized, ArtifactPath -from lisa.trace import Trace +from lisa.trace import Trace, requires_events from lisa.wlgen.rta import Periodic from lisa.tests.kernel.test_bundle import RTATestBundle, Result, ResultBundle, CannotCreateError, TestMetric from lisa.env import TestEnv -from lisa.analysis.tasks import TaskState +from lisa.analysis.tasks import TasksAnalysis, TaskState class MisfitMigrationBase(RTATestBundle): """ @@ -210,6 +210,7 @@ class StaggeredFinishes(MisfitMigrationBase): return Trace.squash_df(state_df, self.start_time, state_df.index[-1] + state_df.delta.values[-1], "delta") + @requires_events('sched_switch', TasksAnalysis.df_task_states.used_events) def test_preempt_time(self, allowed_preempt_pct=1) -> ResultBundle: """ Test that tasks are not being preempted too much @@ -305,6 +306,7 @@ class StaggeredFinishes(MisfitMigrationBase): return res + @requires_events('sched_switch') def test_migration_delay(self, allowed_delay_s=0.001) -> ResultBundle: """ Test that big CPUs pull tasks ASAP @@ -337,6 +339,7 @@ class StaggeredFinishes(MisfitMigrationBase): return self._test_cpus_busy(task_state_dfs, self.dst_cpus, allowed_delay_s) + @requires_events('sched_switch') def test_throughput(self, allowed_idle_time_s=0.001) -> ResultBundle: """ Test that big CPUs are not idle when there are misfit tasks to upmigrate -- GitLab