From a6fe17c00b9a677ae4bd3ec0c8728b7cca97d6ee Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Wed, 23 May 2018 15:16:19 +0100 Subject: [PATCH 1/6] test_env: allow function profiling without events This fixes: a73ed46f3899 env.py: Add debug_events key to target.config where added a check to not generate an FtraceCollector if the user do not specify some events. However, the functions profiler has not been considered and we have also example notebooks where we want to be able to profile kernel functions without necessarely specify trace events. N.B. the FtraceCollector already provide the required logic to add at least one event to properly enabled the function profiler. Look at that devlib module for details. Signed-off-by: Patrick Bellasi --- libs/utils/env.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libs/utils/env.py b/libs/utils/env.py index 438bebe62..6cd8b6ce1 100644 --- a/libs/utils/env.py +++ b/libs/utils/env.py @@ -716,7 +716,7 @@ class TestEnv(ShareState): def _init_ftrace(self, force=False, conf=None): if not force and self.ftrace is not None: - return self.ftrace + return ftrace = conf or self.conf.get('ftrace') if ftrace is None: @@ -726,8 +726,9 @@ class TestEnv(ShareState): functions = ftrace.get('functions', None) buffsize = ftrace.get('buffsize', FTRACE_BUFSIZE_DEFAULT) - # If no events are specified, do not create the FtraceCollector - if not events: + # If no events or functions have been specified: + # do not create the FtraceCollector + if not (events or functions): return self.ftrace = devlib.FtraceCollector( @@ -748,7 +749,7 @@ class TestEnv(ShareState): for function in functions: self._log.info(' %s', function) - return self.ftrace + return def _init_energy(self, force): -- GitLab From 9c55ada034720dd8537366d726dfa8d6cd6636d0 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 8 Jun 2018 15:35:59 +0100 Subject: [PATCH 2/6] utils/trace: make platform an optional parameter Since: commit 074127b ("trace: Allow platform=None") we allow to parse a Trace when platform data are not available. However, the fix introduced by the previous patch was not changing the order of the parameters to ensure backward compatibility. This ended up in an (even more) ugly API where the first mandatory parameter can be None. Let's do the full cleanup and make platform as well as events two fully optional parameters, thus reducing the trace parsing API to something which in its based form can be: trace = Trace('/path/to/trace/file.dat') Backward compatibility is not granted but this adds a sanity check to detected usages of the old API and warn the user about the need to update the call to the new convention. Usages already in mainline of the Trace() API are instead fixed by this patch by properly re-ordering or re-moving the required parameters. Signed-off-by: Patrick Bellasi --- .../benchmarks/Android_Jankbench.ipynb | 2 +- .../android/benchmarks/Android_UiBench.ipynb | 2 +- .../android/workloads/Android_Gmaps.ipynb | 2 +- .../workloads/Android_Recents_Fling.ipynb | 5 ++- .../android/workloads/Android_Viewer.ipynb | 2 +- .../android/workloads/Android_YouTube.ipynb | 2 +- .../TraceAnalysis_FunctionsProfiling.ipynb | 2 +- .../TraceAnalysis_IdleStates.ipynb | 2 +- .../TraceAnalysis_TasksLatencies.ipynb | 2 +- .../trappy/custom_events_example.ipynb | 2 +- ipynb/examples/utils/gem5_example.ipynb | 2 +- ipynb/releases/ReleaseNotes_v16.10.ipynb | 2 +- ipynb/releases/ReleaseNotes_v16.12.ipynb | 5 +-- ipynb/sched_dvfs/smoke_test.ipynb | 5 ++- ipynb/sched_tune/stune_juno_rampL.ipynb | 2 +- .../stune_juno_taskonly_rampL.ipynb | 2 +- ipynb/sched_tune/stune_oak_rampL.ipynb | 2 +- ipynb/tutorial/00_LisaInANutshell.ipynb | 6 +-- ipynb/tutorial/06_TraceAnalysis.ipynb | 2 +- .../UseCaseExamples_SchedTuneAnalysis.ipynb | 7 ++-- libs/utils/test.py | 2 +- libs/utils/trace.py | 41 ++++++++++++++----- libs/utils/wa_results_collector.py | 2 +- tests/eas/load_tracking.py | 2 +- tests/lisa/test_energy_model.py | 2 +- tests/lisa/test_trace.py | 21 ++++++---- tools/plots.py | 2 +- 27 files changed, 79 insertions(+), 51 deletions(-) diff --git a/ipynb/examples/android/benchmarks/Android_Jankbench.ipynb b/ipynb/examples/android/benchmarks/Android_Jankbench.ipynb index 88347cca0..da122f121 100644 --- a/ipynb/examples/android/benchmarks/Android_Jankbench.ipynb +++ b/ipynb/examples/android/benchmarks/Android_Jankbench.ipynb @@ -637,7 +637,7 @@ "with open(platform_file, 'r') as fh:\n", " platform = json.load(fh)\n", "trace_file = os.path.join(te.res_dir, 'trace.dat')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'])\n", + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform)\n", "\n", "trappy.plotter.plot_trace(trace.ftrace)" ] diff --git a/ipynb/examples/android/benchmarks/Android_UiBench.ipynb b/ipynb/examples/android/benchmarks/Android_UiBench.ipynb index 6e5f3cb38..fa02979cd 100644 --- a/ipynb/examples/android/benchmarks/Android_UiBench.ipynb +++ b/ipynb/examples/android/benchmarks/Android_UiBench.ipynb @@ -459,7 +459,7 @@ "with open(platform_file, 'r') as fh:\n", " platform = json.load(fh)\n", "trace_file = os.path.join(te.res_dir, 'trace.html')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'])\n", + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform)\n", "\n", "trappy.plotter.plot_trace(trace.ftrace)" ] diff --git a/ipynb/examples/android/workloads/Android_Gmaps.ipynb b/ipynb/examples/android/workloads/Android_Gmaps.ipynb index da41fedee..c318e3114 100644 --- a/ipynb/examples/android/workloads/Android_Gmaps.ipynb +++ b/ipynb/examples/android/workloads/Android_Gmaps.ipynb @@ -363,7 +363,7 @@ " platform = json.load(fh)\n", "\n", "trace_file = os.path.join(te.res_dir, 'trace.dat')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'], normalize_time=False)" + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform, normalize_time=False)" ] }, { diff --git a/ipynb/examples/android/workloads/Android_Recents_Fling.ipynb b/ipynb/examples/android/workloads/Android_Recents_Fling.ipynb index 97dbf7b9f..911aa8e02 100644 --- a/ipynb/examples/android/workloads/Android_Recents_Fling.ipynb +++ b/ipynb/examples/android/workloads/Android_Recents_Fling.ipynb @@ -433,8 +433,9 @@ " te.ftrace.get_trace(trace_file)\n", " \n", " # Parse trace\n", - " tr = Trace(te.platform, exp_dir,\n", - " events=my_conf['ftrace']['events'])\n", + " tr = Trace(exp_dir,\n", + " my_conf['ftrace']['events'],\n", + " te.platform)\n", " \n", " # return all the experiment data\n", " return {\n", diff --git a/ipynb/examples/android/workloads/Android_Viewer.ipynb b/ipynb/examples/android/workloads/Android_Viewer.ipynb index a3bd270d1..37893d877 100644 --- a/ipynb/examples/android/workloads/Android_Viewer.ipynb +++ b/ipynb/examples/android/workloads/Android_Viewer.ipynb @@ -269,7 +269,7 @@ " platform = json.load(fh)\n", "\n", "trace_file = os.path.join(te.res_dir, 'trace.dat')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'], normalize_time=False)" + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform, normalize_time=False)" ] }, { diff --git a/ipynb/examples/android/workloads/Android_YouTube.ipynb b/ipynb/examples/android/workloads/Android_YouTube.ipynb index 7f4a576d7..b602bcc23 100644 --- a/ipynb/examples/android/workloads/Android_YouTube.ipynb +++ b/ipynb/examples/android/workloads/Android_YouTube.ipynb @@ -332,7 +332,7 @@ "with open(platform_file, 'r') as fh:\n", " platform = json.load(fh)\n", "trace_file = os.path.join(te.res_dir, 'trace.dat')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'])\n", + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform)\n", "\n", "trappy.plotter.plot_trace(trace.ftrace)" ] diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb index ba6a0721e..3f44f410c 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb @@ -440,7 +440,7 @@ } ], "source": [ - "trace = Trace(platform, res_dir, events=[])" + "trace = Trace(res_dir, platform=platform)" ] }, { diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb index 3659e6f76..fddd19178 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb @@ -435,7 +435,7 @@ }, "outputs": [], "source": [ - "trace = Trace(te.platform, res_dir, events=my_conf['ftrace']['events'])" + "trace = Trace(res_dir, my_conf['ftrace']['events'], te.platform)" ] }, { diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb index 27023db7e..ccce2a54e 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb @@ -417,7 +417,7 @@ "outputs": [], "source": [ "trace_file = os.path.join(res_dir, 'trace.dat')\n", - "trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'])" + "trace = Trace(trace_file, my_conf['ftrace']['events'], platform)" ] }, { diff --git a/ipynb/examples/trappy/custom_events_example.ipynb b/ipynb/examples/trappy/custom_events_example.ipynb index d6b4ef246..2d3391676 100644 --- a/ipynb/examples/trappy/custom_events_example.ipynb +++ b/ipynb/examples/trappy/custom_events_example.ipynb @@ -324,7 +324,7 @@ "source": [ "# Parse trace\n", "events_to_parse = my_tests_conf['ftrace']['events'] + my_tests_conf['ftrace']['custom']\n", - "trace = Trace(te.platform, '/tmp', events_to_parse)" + "trace = Trace('/tmp', events_to_parse, te.platform)" ] }, { diff --git a/ipynb/examples/utils/gem5_example.ipynb b/ipynb/examples/utils/gem5_example.ipynb index e6d3df9e4..391756cac 100644 --- a/ipynb/examples/utils/gem5_example.ipynb +++ b/ipynb/examples/utils/gem5_example.ipynb @@ -441,7 +441,7 @@ "\n", "trace_file = os.path.join(te.res_dir, 'trace.dat')\n", "te.ftrace.get_trace(trace_file)\n", - "trace = Trace(platform, trace_file, events=conf['ftrace']['events'], normalize_time=False)" + "trace = Trace(trace_file, conf['ftrace']['events'], platform, normalize_time=False)" ] }, { diff --git a/ipynb/releases/ReleaseNotes_v16.10.ipynb b/ipynb/releases/ReleaseNotes_v16.10.ipynb index 3e3ece946..dcb79744d 100644 --- a/ipynb/releases/ReleaseNotes_v16.10.ipynb +++ b/ipynb/releases/ReleaseNotes_v16.10.ipynb @@ -1824,7 +1824,7 @@ "events_to_parse = my_conf['ftrace']['events']\n", "events_to_parse += ['my_marker'] \n", "\n", - "trace = Trace(te.platform, trace_file, events=events_to_parse)" + "trace = Trace(trace_file, events_to_parse, te.platform)" ] }, { diff --git a/ipynb/releases/ReleaseNotes_v16.12.ipynb b/ipynb/releases/ReleaseNotes_v16.12.ipynb index b51b30085..81b3c8d4d 100644 --- a/ipynb/releases/ReleaseNotes_v16.12.ipynb +++ b/ipynb/releases/ReleaseNotes_v16.12.ipynb @@ -335,9 +335,8 @@ "with open('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/platform.json', 'r') as fh:\n", " platform = json.load(fh)\n", "\n", - "trace = Trace(platform,\n", - " '/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/trace.dat',\n", - " events=['sched_switch']\n", + "trace = Trace('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/trace.dat',\n", + " ['sched_switch'], platform\n)", ")" ] }, diff --git a/ipynb/sched_dvfs/smoke_test.ipynb b/ipynb/sched_dvfs/smoke_test.ipynb index dd588cf3a..428977b87 100644 --- a/ipynb/sched_dvfs/smoke_test.ipynb +++ b/ipynb/sched_dvfs/smoke_test.ipynb @@ -333,8 +333,9 @@ " te.ftrace.get_trace(trace_file)\n", " \n", " # Parse trace\n", - " tr = Trace(te.platform, exp_dir,\n", - " events=my_tests_conf['ftrace']['events'])\n", + " tr = Trace(exp_dir,\n", + " my_tests_conf['ftrace']['events'],\n", + " te.platform)\n", " \n", " # return all the experiment data\n", " return {\n", diff --git a/ipynb/sched_tune/stune_juno_rampL.ipynb b/ipynb/sched_tune/stune_juno_rampL.ipynb index 0e676790e..7ef3a9a89 100644 --- a/ipynb/sched_tune/stune_juno_rampL.ipynb +++ b/ipynb/sched_tune/stune_juno_rampL.ipynb @@ -202,7 +202,7 @@ " \"cpu_frequency\",\n", " \"cpu_capacity\"\n", " ]\n", - " trace = Trace(platform, run_dir, events)\n", + " trace = Trace(run_dir, events, platform)\n", " \n", " # Define time ranges for all the temporal plots\n", " trace.setXTimeRange(t_min, t_max)\n", diff --git a/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb b/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb index 6c120be5d..364a128a2 100644 --- a/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb +++ b/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb @@ -180,7 +180,7 @@ " \"cpu_frequency\",\n", " \"cpu_capacity\"\n", " ]\n", - " trace = Trace(platform, run_dir, events)\n", + " trace = Trace(run_dir, events, platform)\n", " \n", " # Define time ranges for all the temporal plots\n", " trace.setXTimeRange(t_min, t_max)\n", diff --git a/ipynb/sched_tune/stune_oak_rampL.ipynb b/ipynb/sched_tune/stune_oak_rampL.ipynb index 3eeba8df5..48f0c64e4 100644 --- a/ipynb/sched_tune/stune_oak_rampL.ipynb +++ b/ipynb/sched_tune/stune_oak_rampL.ipynb @@ -167,7 +167,7 @@ " \"cpu_frequency\",\n", " \"cpu_capacity\"\n", " ]\n", - " trace = Trace(platform, run_dir, events)\n", + " trace = Trace(run_dir, events, platform)\n", " \n", " # Define time ranges for all the temporal plots\n", " trace.setXTimeRange(t_min, t_max)\n", diff --git a/ipynb/tutorial/00_LisaInANutshell.ipynb b/ipynb/tutorial/00_LisaInANutshell.ipynb index 2d117c0d9..fd83afacd 100644 --- a/ipynb/tutorial/00_LisaInANutshell.ipynb +++ b/ipynb/tutorial/00_LisaInANutshell.ipynb @@ -1441,7 +1441,7 @@ "from trace import Trace\n", "\n", "# Define which event we are interested into\n", - "trace = Trace(te.platform, te.res_dir, [\n", + "trace = Trace(te.res_dir, [\n", " \"sched_switch\",\n", " \"sched_load_avg_cpu\",\n", " \"sched_load_avg_task\",\n", @@ -1449,7 +1449,7 @@ " \"sched_boost_task\",\n", " \"cpu_frequency\",\n", " \"cpu_capacity\",\n", - " ])" + " ], te.platform)" ] }, { @@ -2207,7 +2207,7 @@ " \"cpu_frequency\",\n", " \"cpu_capacity\",\n", " ]\n", - " trace = Trace(platform, test_dir, events)\n", + " trace = Trace(test_dir, events, platform)\n", " \n", " # Define time ranges for all the temporal plots\n", " trace.setXTimeRange(t_min, t_max)\n", diff --git a/ipynb/tutorial/06_TraceAnalysis.ipynb b/ipynb/tutorial/06_TraceAnalysis.ipynb index f9ed78b9b..8eb32c01f 100644 --- a/ipynb/tutorial/06_TraceAnalysis.ipynb +++ b/ipynb/tutorial/06_TraceAnalysis.ipynb @@ -201,7 +201,7 @@ "# The LISA::Trace module is a wrapper of the TRAPpy FTrace module which\n", "# allows to keep track of platform specific details to support the generation\n", "# of\n", - "trace = Trace(platform, res_dir, events_to_parse, window=(t_min,t_max))" + "trace = Trace(res_dir, events_to_parse, platform, window=(t_min,t_max))" ] }, { diff --git a/ipynb/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb b/ipynb/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb index 6e8658f37..05fce1d6c 100644 --- a/ipynb/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb +++ b/ipynb/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb @@ -616,7 +616,7 @@ "from trace import Trace\n", "\n", "# Define which event we are interested into\n", - "trace = Trace(platform, trace_file, [\n", + "trace = Trace(trace_file, [\n", " \"sched_switch\",\n", " \"sched_load_avg_cpu\",\n", " \"sched_load_avg_task\",\n", @@ -624,7 +624,7 @@ " \"sched_boost_task\",\n", " \"cpu_frequency\",\n", " \"cpu_capacity\",\n", - " ])" + " ], platform)" ] }, { @@ -1088,7 +1088,7 @@ }, "outputs": [], "source": [ - "trace = Trace(platform, boost15_trace,\n", + "trace = Trace(boost15_trace,\n", " [\"sched_switch\",\n", " \"sched_overutilized\",\n", " \"sched_load_avg_cpu\",\n", @@ -1098,6 +1098,7 @@ " \"cpu_frequency\",\n", " \"cpu_capacity\",\n", " ],\n", + " platform,\n", " plots_prefix='boost15_'\n", " )" ] diff --git a/libs/utils/test.py b/libs/utils/test.py index d3c30928b..a3a35fb59 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -165,7 +165,7 @@ class LisaTest(unittest.TestCase): 'to your test/experiment configuration flags') events = self.test_conf['ftrace']['events'] - trace = Trace(self.te.platform, experiment.out_dir, events) + trace = Trace(experiment.out_dir, events, self.te.platform) self.__traces[experiment.out_dir] = trace return trace diff --git a/libs/utils/trace.py b/libs/utils/trace.py index 77ef843d2..98d003845 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -41,15 +41,15 @@ class Trace(object): """ The Trace object is the LISA trace events parser. - :param platform: a dictionary containing information about the target - platform - :type platform: dict or None - :param data_dir: folder containing all trace data :type data_dir: str - :param events: events to be parsed (everything in the trace by default) - :type events: list(str) + :param events: events to be parsed (all the events by default) + :type events: str or list(str) + + :param platform: a dictionary containing information about the target + platform + :type platform: dict :param window: time window to consider when parsing the trace :type window: tuple(int, int) @@ -69,13 +69,29 @@ class Trace(object): :type plots_prefix: str """ - def __init__(self, platform, data_dir, events, + def __init__(self, data_dir, + events=None, + platform=None, window=(0, None), normalize_time=True, trace_format='FTrace', plots_dir=None, plots_prefix=''): + # Setup logging + self._log = logging.getLogger('Trace') + + # Sanity check for API update: + # platform used to be the first mandatory argument, now is the third + # one and optional. We can still detect clients using the old API since + # the new first parameter is forced to be a string. If it's not, + # lekely the used is using the old API. + if not isinstance(data_dir, str): + logging.error("The first parameter of Trace() constructor is " + "expected to be a sting path") + raise ValueError("Deprecated Trace() API usage detected: " + "check constructor signature!") + # The platform used to run the experiments self.platform = platform or {} @@ -113,9 +129,6 @@ class Trace(object): # Folder containing all trace data self.data_dir = None - # Setup logging - self._log = logging.getLogger('Trace') - # Folder containing trace if not os.path.isdir(data_dir): self.data_dir = os.path.dirname(data_dir) or '.' @@ -140,6 +153,10 @@ class Trace(object): max_cpu = max(int(self.data_frame.trace_event(e)['__cpu'].max()) for e in self.available_events) self.platform['cpus_count'] = max_cpu + 1 + # If a CPUs count is not available here, let's assume we are running on + # a unicore system and set cpus_count=1 so that the following analysis + # methods will not complain about the CPUs count not being available. + self.platform['cpus_count'] = self.platform.get('cpus_count', 1) self.analysis = AnalysisRegister(self) @@ -183,6 +200,10 @@ class Trace(object): :param events: single event name or list of events names :type events: str or list(str) """ + # Parse all events by default + if events is None: + self.events = [] + return if isinstance(events, basestring): self.events = events.split(' ') elif isinstance(events, list): diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index fbb035ca2..40fb81c4f 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -353,7 +353,7 @@ class WaResultsCollector(object): metrics = [] events = ['irq_handler_entry', 'cpu_frequency', 'nohz_kick', 'sched_switch', 'sched_load_cfs_rq', 'sched_load_avg_task', 'thermal_temperature'] - trace = Trace(self.platform, trace_path, events) + trace = Trace(trace_path, events, self.platform) metrics.append(('cpu_wakeup_count', len(trace.data_frame.cpu_wakeups()), None)) diff --git a/tests/eas/load_tracking.py b/tests/eas/load_tracking.py index fdc714e17..319129710 100644 --- a/tests/eas/load_tracking.py +++ b/tests/eas/load_tracking.py @@ -1001,7 +1001,7 @@ class _PELTTaskGroupsTest(LisaTest): cls._log.info('Collected FTrace binary trace: %s', trace_file) # Extract trace - cls.trace = Trace(None, test_dir, te.ftrace.events) + cls.trace = Trace(test_dir, te.ftrace.events) @classmethod def _migrate_task(cls, test_env): diff --git a/tests/lisa/test_energy_model.py b/tests/lisa/test_energy_model.py index bfb9c79fa..5e4d004e1 100644 --- a/tests/lisa/test_energy_model.py +++ b/tests/lisa/test_energy_model.py @@ -346,7 +346,7 @@ class TestEstimateFromTrace(TestCase): path = os.path.join(dir, 'trace.txt') with open(path, 'w') as f: f.write(trace_data) - trace = Trace(None, path, ['cpu_idle', 'cpu_frequency'], + trace = Trace(path, ['cpu_idle', 'cpu_frequency'], normalize_time=False) shutil.rmtree(dir) diff --git a/tests/lisa/test_trace.py b/tests/lisa/test_trace.py index 20208c6e2..29c1eaad3 100644 --- a/tests/lisa/test_trace.py +++ b/tests/lisa/test_trace.py @@ -42,7 +42,7 @@ class TestTrace(TestCase): self.platform = self._get_platform() self.trace_path = os.path.join(self.traces_dir, 'trace.txt') - self.trace = Trace(self.platform, self.trace_path, self.events) + self.trace = Trace(self.trace_path, self.events, self.platform) def make_trace(self, in_data): """ @@ -51,8 +51,8 @@ class TestTrace(TestCase): with open(self.test_trace, "w") as fout: fout.write(in_data) - return Trace(self.platform, self.test_trace, self.events, - normalize_time=False) + return Trace(self.test_trace, self.events, + self.platform, normalize_time=False) def get_trace(self, trace_name): """ @@ -61,7 +61,8 @@ class TestTrace(TestCase): dir = os.path.join(self.traces_dir, trace_name) trace_path = os.path.join(dir, 'trace.dat') - return Trace(self._get_platform(trace_name), trace_path, self.events) + return Trace(trace_path, self.events, + self._get_platform(trace_name)) def _get_platform(self, trace_name=None): trace_dir = self.traces_dir @@ -113,8 +114,10 @@ class TestTrace(TestCase): """ expected_duration = 6.676497 - trace = Trace(self.platform, self.trace_path, - self.events, normalize_time=False + trace = Trace(self.trace_path, + self.events, + self.platform, + normalize_time=False ) self.assertAlmostEqual(trace.time_range, expected_duration, places=6) @@ -125,8 +128,10 @@ class TestTrace(TestCase): """ expected_duration = 4.0 - trace = Trace(self.platform, self.trace_path, - self.events, normalize_time=False, + trace = Trace(self.trace_path, + self.events, + self.platform, + normalize_time=False, window=(76.402065, 80.402065) ) diff --git a/tools/plots.py b/tools/plots.py index 077f8fdab..fde2d2e7f 100755 --- a/tools/plots.py +++ b/tools/plots.py @@ -142,7 +142,7 @@ def plotdir(run_dir, platform): logging.info('No performance data found') # Load Trace Analysis modules - trace = Trace(platform, run_dir) + trace = Trace(run_dir, platform=platform) # Define time ranges for all the temporal plots trace.setXTimeRange(args.tmin, args.tmax) -- GitLab From eeda9f7b73ad4f396150dc86fbfe159df14482af Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 15 Jun 2018 18:11:47 +0100 Subject: [PATCH 3/6] utils/trace: parse kernel version one time When the latency analysis is performed on multiple tasks, for each one we identify the kernel version and split out an annoying log message. Since the kernel version is actually a trace property, let's parse it from platform data (whenever available) before parsing the trace. The latency analysis module is thus updated to use the pre-computed kernel version. Signed-off-by: Patrick Bellasi --- libs/utils/analysis/latency_analysis.py | 7 +------ libs/utils/trace.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/libs/utils/analysis/latency_analysis.py b/libs/utils/analysis/latency_analysis.py index 86fb2e5b1..b9af70164 100644 --- a/libs/utils/analysis/latency_analysis.py +++ b/libs/utils/analysis/latency_analysis.py @@ -868,12 +868,7 @@ class LatencyAnalysis(AnalysisModule): 512: "P", # TASK_PARKED 1024: "N", # TASK_NOLOAD } - kver = self._trace.platform['kernel']['parts'] - if kver is None: - kver = (3, 18) - self._log.info('Parsing sched_switch states assuming kernel v%d.%d', - kver[0], kver[1]) - if kver >= (4, 8): + if self._trace.kernel_version >= (4, 8): TASK_STATES[2048] = "n" # TASK_NEW TASK_MAX_STATE = 2 * max(TASK_STATES) diff --git a/libs/utils/trace.py b/libs/utils/trace.py index 98d003845..ccb309b33 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -129,6 +129,9 @@ class Trace(object): # Folder containing all trace data self.data_dir = None + # Version of the traced kernel + self.kernel_version = None + # Folder containing trace if not os.path.isdir(data_dir): self.data_dir = os.path.dirname(data_dir) or '.' @@ -158,6 +161,16 @@ class Trace(object): # methods will not complain about the CPUs count not being available. self.platform['cpus_count'] = self.platform.get('cpus_count', 1) + # Setup kernel version + if self.platform.get('kernel', {}).get('parts'): + self.kernel_version = self.platform['kernel']['parts'] + self._log.info('Kernel version loaded from platform data') + else: + self.kernel_version = (3, 18) + self._log.warning('Kernel version not available from platform data') + self._log.info('Parsing trace assuming kernel v%d.%d', + self.kernel_version[0], self.kernel_version[1]) + self.analysis = AnalysisRegister(self) def _registerDataFrameGetters(self, module): -- GitLab From 228430282737252d08ddda1328a33ff88a79a726 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 8 Jun 2018 15:34:14 +0100 Subject: [PATCH 4/6] utils/ftrace: add a utility function to display a trace Whenever a trace has been parsed, it could be useful sometimes to still open it using the most appropriate native viewer. Let's open ftrace using kernelshark of systraces using the default user web browser. Signed-off-by: Patrick Bellasi --- libs/utils/trace.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/libs/utils/trace.py b/libs/utils/trace.py index ccb309b33..b099f958e 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -26,6 +26,7 @@ import json import warnings import operator import logging +import webbrowser from analysis_register import AnalysisRegister from collections import namedtuple @@ -430,6 +431,23 @@ class Trace(object): """ return self._tasks_by_pid.TaskName.to_dict() + def show(self): + """ + Open the parsed trace using the most appropriate native viewer. + + The native viewer depends on the specified trace format: + - ftrace: open using kernelshark + - systrace: open using a browser + + In both cases the native viewer is assumed to be available in the host + machine. + """ + if isinstance(self.ftrace, trappy.FTrace): + return os.popen("kernelshark '{}'".format(self.ftrace.trace_path)) + if isinstance(self.ftrace, trappy.SysTrace): + return webbrowser.open(self.ftrace.trace_path) + self._log.warning('No trace data available') + ############################################################################### # DataFrame Getter Methods -- GitLab From 3b7c0f8d26d5c66808722a5e64e9a49c8f8e4154 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Mon, 25 Jun 2018 15:21:51 +0100 Subject: [PATCH 5/6] tools/wltests: build: return immediately on failures When a build target fails we do not properly forward the make exit code. Let's to it so that, for example, we avoid to flash an image if a kernel build should fails. This is not expected to happen on properly tested and bisactable SHA1 series, but this support is still valueable to have when for example wltest is used to easly build/test cycles on development kernels. Signed-off-by: Patrick Bellasi --- tools/wltests/build | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/wltests/build b/tools/wltests/build index 31619859d..8b4771f33 100755 --- a/tools/wltests/build +++ b/tools/wltests/build @@ -288,40 +288,48 @@ fi if [ ${BUILD_IMAGE} -eq $YES ]; then c_info "Making [Image]..." (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} "$KERNEL_IMAGE" >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${BUILD_DTBS} -eq $YES ]; then c_info "Making [dtbs]..." (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} dtbs >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${BUILD_DTBO_IMAGE} -eq $YES ]; then c_info "Making [dtbo.img]..." (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} dtbo.img >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${BUILD_MODULES} -eq $YES ]; then c_info "Making [modules]..." (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} modules >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${BUILD_MODULES_INSTALL} -eq $YES ]; then c_info "Making [modules_install] to [${INSTALL_MOD_PATH}] ..." (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} modules_install >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${MAKE_MENUCONFIG} -eq $YES ]; then c_info "Making [menuconfig]..." (set -x; cd "${KERNEL_SRC}"; make menuconfig) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${MAKE_SAVEDEFCONFIG} -eq $YES ]; then c_info "Making [savedefconfig]..." (set -x; cd "${KERNEL_SRC}"; make savedefconfig) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi if [ ${BUILD_NOW} -eq $YES ]; then c_info "Making default target..." (set -x; cd "${KERNEL_SRC}" make -j${NCPUS} >${OUT}) + ERROR=$?; [ $ERROR -eq 0 ] || exit $ERROR fi -- GitLab From 89d718ee7eed0af8010ae60a1f1b0d45d3ce1b58 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 26 Jun 2018 11:13:36 +0100 Subject: [PATCH 6/6] latency_analysis: add statistics getter method Right now, the plotLatency method returns a DF with statistics for the latencies of a specified task. However, sometimes we could be interested in getting these stats without necessarily plotting all the data. Let's move the statistics DF generation into an internal _get_latency_df method, which can then be used by the plotLatency but also from an additional DF gatter: _dfg_latency_stats_df. Thus, this will make available a new: trace.data_frame.latency_stats_df() which just return the overall latency stats for the specified task. Signed-off-by: Patrick Bellasi --- libs/utils/analysis/latency_analysis.py | 109 ++++++++++++++++++------ 1 file changed, 81 insertions(+), 28 deletions(-) diff --git a/libs/utils/analysis/latency_analysis.py b/libs/utils/analysis/latency_analysis.py index b9af70164..9344df816 100644 --- a/libs/utils/analysis/latency_analysis.py +++ b/libs/utils/analysis/latency_analysis.py @@ -323,18 +323,10 @@ class LatencyAnalysis(AnalysisModule): data = [(cpu, time) for cpu, time in runtimes.iteritems()] return pd.DataFrame(data, columns=['CPU', 'runtime']).set_index('CPU') -############################################################################### -# Plotting Methods -############################################################################### - - def plotLatency(self, task, kind='all', tag=None, threshold_ms=1, bins=64): + @memoized + def _get_latency_df(self, task, kind='all', threshold_ms=1): """ - Generate a set of plots to report the WAKEUP and PREEMPT latencies the - specified task has been subject to. A WAKEUP latencies is the time from - when a task becomes RUNNABLE till the first time it gets a CPU. - A PREEMPT latencies is the time from when a RUNNING task is suspended - because of the CPU is assigned to another task till when the task - enters the CPU again. + Compute statistics on latencies of the specified task. :param task: the task to report latencies for :type task: int or list(str) @@ -342,17 +334,11 @@ class LatencyAnalysis(AnalysisModule): :param kind: the kind of latencies to report (WAKEUP and/or PREEMPT") :type kind: str - :param tag: a string to add to the plot title - :type tag: str - :param threshold_ms: the minimum acceptable [ms] value to report graphically in the generated plots :type threshold_ms: int or float - :param bins: number of bins to be used for the runtime's histogram - :type bins: int - - :returns: a DataFrame with statistics on ploted latencies + :returns: a DataFrame with statistics on task latencies """ if not self._trace.hasEvents('sched_switch'): @@ -391,14 +377,87 @@ class LatencyAnalysis(AnalysisModule): # Join the two data frames df = wkp_df.append(prt_df) - ymax = 1.1 * df.latency.max() + cdf = self._getCDF(df.latency, (threshold_ms / 1000.)) + + return df, cdf + + + @memoized + def _dfg_latency_stats_df(self, task, kind='all', threshold_ms=1): + """ + Compute statistics on latencies of the specified task. + + :param task: the task to report latencies for + :type task: int or list(str) + + :param kind: the kind of latencies to report (WAKEUP and/or PREEMPT") + :type kind: str + + :param threshold_ms: the minimum acceptable [ms] value to report + graphically in the generated plots + :type threshold_ms: int or float + + :returns: a DataFrame with statistics on task latencies + """ + + # Get latency events + df, cdf = self._get_latency_df(task, kind, threshold_ms) self._log.info('Total: %5d latency events', len(df)) + self._log.info('%.1f %% samples below %d [ms] threshold', + 100. * cdf.below, threshold_ms) - # Build the series for the CDF - cdf = self._getCDF(df.latency, (threshold_ms / 1000.)) + # Return statistics + stats_df = df.describe(percentiles=[0.95, 0.99]) + label = '{:.1f}%'.format(100. * cdf.below) + stats = { label : cdf.threshold } + return stats_df.append(pd.DataFrame( + stats.values(), columns=['latency'], index=stats.keys())) + + +############################################################################### +# Plotting Methods +############################################################################### + + def plotLatency(self, task, kind='all', tag=None, threshold_ms=1, bins=64): + """ + Generate a set of plots to report the WAKEUP and PREEMPT latencies the + specified task has been subject to. A WAKEUP latencies is the time from + when a task becomes RUNNABLE till the first time it gets a CPU. + A PREEMPT latencies is the time from when a RUNNING task is suspended + because of the CPU is assigned to another task till when the task + enters the CPU again. + + :param task: the task to report latencies for + :type task: int or list(str) + + :param kind: the kind of latencies to report (WAKEUP and/or PREEMPT") + :type kind: str + + :param tag: a string to add to the plot title + :type tag: str + + :param threshold_ms: the minimum acceptable [ms] value to report + graphically in the generated plots + :type threshold_ms: int or float + + :param bins: number of bins to be used for the runtime's histogram + :type bins: int + + :returns: a DataFrame with statistics on ploted latencies + """ + + # Get latency events + df, cdf = self._get_latency_df(task, kind, threshold_ms) + self._log.info('Total: %5d latency events', len(df)) self._log.info('%.1f %% samples below %d [ms] threshold', 100. * cdf.below, threshold_ms) + # Get task data + td = self._getTaskData(task) + if not td: + return None + + # Setup plots gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1]) plt.figure(figsize=(16, 8)) @@ -434,6 +493,7 @@ class LatencyAnalysis(AnalysisModule): # Histogram of all latencies axes = plt.subplot(gs[1,1]) + ymax = 1.1 * df.latency.max() df.latency.plot(kind='hist', bins=bins, ax=axes, xlim=(0,ymax), legend=False, title='Latency histogram ({} bins, {} [ms] green threshold)'\ @@ -447,13 +507,6 @@ class LatencyAnalysis(AnalysisModule): td.pid, task_name) pl.savefig(figname, bbox_inches='tight') - # Return statistics - stats_df = df.describe(percentiles=[0.95, 0.99]) - label = '{:.1f}%'.format(100. * cdf.below) - stats = { label : cdf.threshold } - return stats_df.append(pd.DataFrame( - stats.values(), columns=['latency'], index=stats.keys())) - def plotLatencyBands(self, task, axes=None): """ -- GitLab