From c278c2343c7f25083a80cb164b6bdc761d50050b Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 22 Nov 2016 14:40:28 +0000 Subject: [PATCH 1/5] libs: use a more configurable logging strategy This is a big re-factoring which update all the logging statements to make use of a more sane and consistent configuration: - each module uses the same logger configuration but refers to a custom logger which has the same name of the module, this should allows a fine grained configuration of the loglevels for each module. - a new libs/utils/conf.py::LisaLogging class allows to load the configuration and customize the root logger level. - loggers are configured using a single logfile (logging.conf) which is provided by default in the LISA's root folder. Each logger can be configured to specify: - loglevel: INFO by default for all modules - output channels: console and logfile (lisa.log by default) for all modules - string format: the same by default for all modules If logging.conf does not specify a specific configuration for a logger, than the default logger configuration applies, which is the one defined by the "root" logger. Notebooks and LisaTest are required to call at least one time the LisaLogging::setup() method to properly intialize logging. Signed-off-by: Patrick Bellasi --- libs/bart | 2 +- libs/devlib | 2 +- libs/trappy | 2 +- libs/utils/__init__.py | 2 +- libs/utils/analysis/cpus_analysis.py | 13 +- libs/utils/analysis/eas_analysis.py | 77 ++++---- libs/utils/analysis/frequency_analysis.py | 41 ++--- libs/utils/analysis/functions_analysis.py | 5 +- libs/utils/analysis/idle_analysis.py | 19 +- libs/utils/analysis/latency_analysis.py | 65 ++++--- libs/utils/analysis/status_analysis.py | 7 +- libs/utils/analysis/tasks_analysis.py | 86 +++++---- libs/utils/analysis_module.py | 4 + libs/utils/analysis_register.py | 16 +- libs/utils/android/screen.py | 20 +- libs/utils/android/system.py | 4 +- libs/utils/android/workload.py | 33 ++-- libs/utils/android/workloads/jankbench.py | 38 ++-- libs/utils/android/workloads/youtube.py | 12 +- libs/utils/conf.py | 41 ++++- libs/utils/energy.py | 126 +++++++------ libs/utils/env.py | 212 ++++++++++------------ libs/utils/executor.py | 165 ++++++++--------- libs/utils/perf_analysis.py | 13 +- libs/utils/report.py | 70 ++++--- libs/utils/results.py | 60 +++--- libs/utils/test.py | 14 +- libs/utils/trace.py | 68 +++---- libs/wlgen/wlgen/perf_bench.py | 1 + libs/wlgen/wlgen/rta.py | 102 +++++------ libs/wlgen/wlgen/workload.py | 63 +++---- logging.conf | 87 +++++++++ tests/eas/capacity_capping.py | 5 - 33 files changed, 765 insertions(+), 710 deletions(-) create mode 100644 logging.conf diff --git a/libs/bart b/libs/bart index caa216a10..290caafb3 160000 --- a/libs/bart +++ b/libs/bart @@ -1 +1 @@ -Subproject commit caa216a10e21cd9557cdedb91e5479e690cb64a4 +Subproject commit 290caafb3675103f8ee91b71245a7aba0f2b0c2c diff --git a/libs/devlib b/libs/devlib index cae239d1d..91f4f97a0 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit cae239d1dc1d379c3acdec081583a56bf5392313 +Subproject commit 91f4f97a0b44ab76da0c0214cb86815f969ab465 diff --git a/libs/trappy b/libs/trappy index d7565ebcd..efdf0daef 160000 --- a/libs/trappy +++ b/libs/trappy @@ -1 +1 @@ -Subproject commit d7565ebcd0c4d4a01bf8aa8db522405c6ee2a190 +Subproject commit efdf0daef51c9d2d5461876c4408318160cba338 diff --git a/libs/utils/__init__.py b/libs/utils/__init__.py index 1ad7ce895..075a73f28 100644 --- a/libs/utils/__init__.py +++ b/libs/utils/__init__.py @@ -21,7 +21,7 @@ from env import TestEnv from executor import Executor from energy import EnergyMeter -from conf import JsonConf +from conf import LisaLogging, JsonConf from trace import Trace from perf_analysis import PerfAnalysis diff --git a/libs/utils/analysis/cpus_analysis.py b/libs/utils/analysis/cpus_analysis.py index 0df67610e..17ef4a475 100644 --- a/libs/utils/analysis/cpus_analysis.py +++ b/libs/utils/analysis/cpus_analysis.py @@ -24,9 +24,6 @@ from trappy.utils import listify from analysis_module import AnalysisModule -# Configure logging -import logging - class CpusAnalysis(AnalysisModule): """ @@ -51,8 +48,8 @@ class CpusAnalysis(AnalysisModule): :returns: :mod:`pandas.DataFrame` """ if not self._trace.hasEvents('sched_switch'): - logging.warn('Events [sched_switch] not found, context switch ' - 'computation not possible!') + self._log.warning('Events [sched_switch] not found, context switch ' + 'computation not possible!') return None sched_df = self._dfg_trace_event('sched_switch') @@ -78,8 +75,8 @@ class CpusAnalysis(AnalysisModule): :type cpus: list(int) """ if not self._trace.hasEvents('sched_load_avg_cpu'): - logging.warn('Events [sched_load_avg_cpu] not found, ' - 'plot DISABLED!') + self._log.warning('Events [sched_load_avg_cpu] not found, ' + 'plot DISABLED!') return # Filter on specified cpus @@ -182,7 +179,7 @@ class CpusAnalysis(AnalysisModule): Plot histogram of context switches on each CPU. """ if not self._trace.hasEvents('sched_switch'): - logging.warn('Events [sched_switch] not found, plot DISABLED!') + self._log.warning('Events [sched_switch] not found, plot DISABLED!') return ctx_sw_df = self._dfg_context_switches() diff --git a/libs/utils/analysis/eas_analysis.py b/libs/utils/analysis/eas_analysis.py index 395d58e8b..e34c4614d 100644 --- a/libs/utils/analysis/eas_analysis.py +++ b/libs/utils/analysis/eas_analysis.py @@ -23,9 +23,6 @@ import pylab as pl from analysis_module import AnalysisModule -# Configure logging -import logging - class EasAnalysis(AnalysisModule): """ @@ -56,53 +53,53 @@ class EasAnalysis(AnalysisModule): Plot energy_diff()-related signals on time axes. """ if not self._trace.hasEvents('sched_energy_diff'): - logging.warn('Event [sched_energy_diff] not found, plot DISABLED!') + self._log.warning('Event [sched_energy_diff] not found, plot DISABLED!') return df = self._dfg_trace_event('sched_energy_diff') # Filter on 'tasks' if tasks is not None: - logging.info('Plotting EDiff data just for task(s) [%s]', tasks) + self._log.info('Plotting EDiff data just for task(s) [%s]', tasks) df = df[df['comm'].isin(tasks)] # Filter on 'usage_delta' if min_usage_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'usage_delta of [%d]', min_usage_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'usage_delta of [%d]', min_usage_delta) df = df[abs(df['usage_delta']) >= min_usage_delta] if max_usage_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'usage_delta of [%d]', max_usage_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'usage_delta of [%d]', max_usage_delta) df = df[abs(df['usage_delta']) <= max_usage_delta] # Filter on 'cap_delta' if min_cap_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'cap_delta of [%d]', min_cap_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'cap_delta of [%d]', min_cap_delta) df = df[abs(df['cap_delta']) >= min_cap_delta] if max_cap_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'cap_delta of [%d]', max_cap_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'cap_delta of [%d]', max_cap_delta) df = df[abs(df['cap_delta']) <= max_cap_delta] # Filter on 'nrg_delta' if min_nrg_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'nrg_delta of [%d]', min_nrg_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'nrg_delta of [%d]', min_nrg_delta) df = df[abs(df['nrg_delta']) >= min_nrg_delta] if max_nrg_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'nrg_delta of [%d]', max_nrg_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'nrg_delta of [%d]', max_nrg_delta) df = df[abs(df['nrg_delta']) <= max_nrg_delta] # Filter on 'nrg_diff' if min_nrg_diff is not None: - logging.info('Plotting EDiff data just with minimum ' - 'nrg_diff of [%d]', min_nrg_diff) + self._log.info('Plotting EDiff data just with minimum ' + 'nrg_diff of [%d]', min_nrg_diff) df = df[abs(df['nrg_diff']) >= min_nrg_diff] if max_nrg_diff is not None: - logging.info('Plotting EDiff data just with maximum ' - 'nrg_diff of [%d]', max_nrg_diff) + self._log.info('Plotting EDiff data just with maximum ' + 'nrg_diff of [%d]', max_nrg_diff) df = df[abs(df['nrg_diff']) <= max_nrg_diff] # Grid: setup stats for gris @@ -200,53 +197,53 @@ class EasAnalysis(AnalysisModule): (PxE). """ if not self._trace.hasEvents('sched_energy_diff'): - logging.warn('Event [sched_energy_diff] not found, plot DISABLED!') + self._log.warning('Event [sched_energy_diff] not found, plot DISABLED!') return df = self._dfg_trace_event('sched_energy_diff') # Filter on 'tasks' if tasks is not None: - logging.info('Plotting EDiff data just for task(s) [%s]', tasks) + self._log.info('Plotting EDiff data just for task(s) [%s]', tasks) df = df[df['comm'].isin(tasks)] # Filter on 'usage_delta' if min_usage_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'usage_delta of [%d]', min_usage_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'usage_delta of [%d]', min_usage_delta) df = df[abs(df['usage_delta']) >= min_usage_delta] if max_usage_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'usage_delta of [%d]', max_usage_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'usage_delta of [%d]', max_usage_delta) df = df[abs(df['usage_delta']) <= max_usage_delta] # Filter on 'cap_delta' if min_cap_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'cap_delta of [%d]', min_cap_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'cap_delta of [%d]', min_cap_delta) df = df[abs(df['cap_delta']) >= min_cap_delta] if max_cap_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'cap_delta of [%d]', max_cap_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'cap_delta of [%d]', max_cap_delta) df = df[abs(df['cap_delta']) <= max_cap_delta] # Filter on 'nrg_delta' if min_nrg_delta is not None: - logging.info('Plotting EDiff data just with minimum ' - 'nrg_delta of [%d]', min_nrg_delta) + self._log.info('Plotting EDiff data just with minimum ' + 'nrg_delta of [%d]', min_nrg_delta) df = df[abs(df['nrg_delta']) >= min_nrg_delta] if max_nrg_delta is not None: - logging.info('Plotting EDiff data just with maximum ' - 'nrg_delta of [%d]', max_nrg_delta) + self._log.info('Plotting EDiff data just with maximum ' + 'nrg_delta of [%d]', max_nrg_delta) df = df[abs(df['nrg_delta']) <= max_nrg_delta] # Filter on 'nrg_diff' if min_nrg_diff is not None: - logging.info('Plotting EDiff data just with minimum ' - 'nrg_diff of [%d]', min_nrg_diff) + self._log.info('Plotting EDiff data just with minimum ' + 'nrg_diff of [%d]', min_nrg_diff) df = df[abs(df['nrg_diff']) >= min_nrg_diff] if max_nrg_diff is not None: - logging.info('Plotting EDiff data just with maximum ' - 'nrg_diff of [%d]', max_nrg_diff) + self._log.info('Plotting EDiff data just with maximum ' + 'nrg_diff of [%d]', max_nrg_diff) df = df[abs(df['nrg_diff']) <= max_nrg_diff] # Grid: setup grid for P-E space @@ -367,7 +364,7 @@ class EasAnalysis(AnalysisModule): Plot the configuration of SchedTune. """ if not self._trace.hasEvents('sched_tune_config'): - logging.warn('Event [sched_tune_config] not found, plot DISABLED!') + self._log.warning('Event [sched_tune_config] not found, plot DISABLED!') return # Grid gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1]) diff --git a/libs/utils/analysis/frequency_analysis.py b/libs/utils/analysis/frequency_analysis.py index ddf098ff5..26bcfcc81 100644 --- a/libs/utils/analysis/frequency_analysis.py +++ b/libs/utils/analysis/frequency_analysis.py @@ -28,9 +28,6 @@ from devlib.utils.misc import memoized from analysis_module import AnalysisModule from trace import NON_IDLE_STATE, ResidencyTime, ResidencyData -# Configure logging -import logging - class FrequencyAnalysis(AnalysisModule): """ @@ -99,7 +96,7 @@ class FrequencyAnalysis(AnalysisModule): self._platform['clusters'][cluster.lower()] ) except KeyError: - logging.warn( + self._log.warning( 'Platform descriptor has not a cluster named [%s], ' 'plot disabled!', cluster ) @@ -127,7 +124,7 @@ class FrequencyAnalysis(AnalysisModule): :type title: str """ if not self._trace.hasEvents('cpu_frequency'): - logging.warn('Events [cpu_frequency] not found, plot DISABLED!') + self._log.warning('Events [cpu_frequency] not found, plot DISABLED!') return df = self._dfg_trace_event('cpu_frequency') @@ -183,7 +180,7 @@ class FrequencyAnalysis(AnalysisModule): bfreq['frequency'].plot(style=['r-'], ax=axes, drawstyle='steps-post', alpha=0.4) else: - logging.warn('NO big CPUs frequency events to plot') + self._log.warning('NO big CPUs frequency events to plot') axes.set_xlim(self._trace.x_min, self._trace.x_max) axes.set_ylabel('MHz') axes.grid(True) @@ -203,7 +200,7 @@ class FrequencyAnalysis(AnalysisModule): lfreq['frequency'].plot(style=['b-'], ax=axes, drawstyle='steps-post', alpha=0.4) else: - logging.warn('NO LITTLE CPUs frequency events to plot') + self._log.warning('NO LITTLE CPUs frequency events to plot') axes.set_xlim(self._trace.x_min, self._trace.x_max) axes.set_ylabel('MHz') axes.grid(True) @@ -214,10 +211,10 @@ class FrequencyAnalysis(AnalysisModule): .format(self._trace.plots_dir, self._trace.plots_prefix) pl.savefig(figname, bbox_inches='tight') - logging.info('LITTLE cluster average frequency: %.3f GHz', - avg_lfreq/1e3) - logging.info('big cluster average frequency: %.3f GHz', - avg_bfreq/1e3) + self._log.info('LITTLE cluster average frequency: %.3f GHz', + avg_lfreq/1e3) + self._log.info('big cluster average frequency: %.3f GHz', + avg_bfreq/1e3) return (avg_lfreq/1e3, avg_bfreq/1e3) @@ -241,10 +238,10 @@ class FrequencyAnalysis(AnalysisModule): :type active: bool """ if not self._trace.hasEvents('cpu_frequency'): - logging.warn('Events [cpu_frequency] not found, plot DISABLED!') + self._log.warning('Events [cpu_frequency] not found, plot DISABLED!') return if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, plot DISABLED!') + self._log.warning('Events [cpu_idle] not found, plot DISABLED!') return if cpus is None: @@ -297,17 +294,17 @@ class FrequencyAnalysis(AnalysisModule): :type active: bool """ if not self._trace.hasEvents('cpu_frequency'): - logging.warn('Events [cpu_frequency] not found, plot DISABLED!') + self._log.warning('Events [cpu_frequency] not found, plot DISABLED!') return if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, plot DISABLED!') + self._log.warning('Events [cpu_idle] not found, plot DISABLED!') return # Assumption: all CPUs in a cluster run at the same frequency, i.e. the # frequency is scaled per-cluster not per-CPU. Hence, we can limit the # cluster frequencies data to a single CPU if not self._trace.freq_coherency: - logging.warn('Cluster frequency is not coherent, plot DISABLED!') + self._log.warning('Cluster frequency is not coherent, plot DISABLED!') return # Sanitize clusters @@ -349,12 +346,12 @@ class FrequencyAnalysis(AnalysisModule): dataframes """ if not self._trace.hasEvents('cpu_frequency'): - logging.warn('Events [cpu_frequency] not found, ' - 'frequency residency computation not possible!') + self._log.warning('Events [cpu_frequency] not found, ' + 'frequency residency computation not possible!') return None if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, ' - 'frequency residency computation not possible!') + self._log.warning('Events [cpu_idle] not found, ' + 'frequency residency computation not possible!') return None _cluster = listify(cluster) @@ -365,8 +362,8 @@ class FrequencyAnalysis(AnalysisModule): # cluster frequencies data to a single CPU. This assumption is verified # by the Trace module when parsing the trace. if len(_cluster) > 1 and not self._trace.freq_coherency: - logging.warn('Cluster frequency is NOT coherent,' - 'cannot compute residency!') + self._log.warning('Cluster frequency is NOT coherent,' + 'cannot compute residency!') return None cluster_freqs = freq_df[freq_df.cpu == _cluster[0]] diff --git a/libs/utils/analysis/functions_analysis.py b/libs/utils/analysis/functions_analysis.py index a82f63ea8..4f4e7da44 100644 --- a/libs/utils/analysis/functions_analysis.py +++ b/libs/utils/analysis/functions_analysis.py @@ -21,9 +21,6 @@ from trappy.utils import listify from analysis_module import AnalysisModule -# Configure logging -import logging - class FunctionsAnalysis(AnalysisModule): """ @@ -53,7 +50,7 @@ class FunctionsAnalysis(AnalysisModule): :type metrics: srt or list(str) """ if not hasattr(self._trace, '_functions_stats_df'): - logging.warning('Functions stats data not available') + self._log.warning('Functions stats data not available') return metrics = listify(metrics) diff --git a/libs/utils/analysis/idle_analysis.py b/libs/utils/analysis/idle_analysis.py index 7819124e6..0eb14bcfd 100644 --- a/libs/utils/analysis/idle_analysis.py +++ b/libs/utils/analysis/idle_analysis.py @@ -26,9 +26,6 @@ from analysis_module import AnalysisModule from trace import ResidencyTime, ResidencyData from trappy.utils import listify -# Configure logging -import logging - class IdleAnalysis(AnalysisModule): """ @@ -55,8 +52,8 @@ class IdleAnalysis(AnalysisModule): :returns: :mod:`pandas.DataFrame` - idle state residency dataframe """ if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, '\ - 'idle state residency computation not possible!') + self._log.warning('Events [cpu_idle] not found, ' + 'idle state residency computation not possible!') return None idle_df = self._dfg_trace_event('cpu_idle') @@ -105,8 +102,8 @@ class IdleAnalysis(AnalysisModule): :returns: :mod:`pandas.DataFrame` - idle state residency dataframe """ if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, '\ - 'idle state residency computation not possible!') + self._log.warning('Events [cpu_idle] not found, ' + 'idle state residency computation not possible!') return None _cluster = cluster @@ -114,7 +111,7 @@ class IdleAnalysis(AnalysisModule): try: _cluster = self._platform['clusters'][cluster.lower()] except KeyError: - logging.warn('%s cluster not found!', cluster) + self._log.warning('%s cluster not found!', cluster) return None idle_df = self._dfg_trace_event('cpu_idle') @@ -181,8 +178,8 @@ class IdleAnalysis(AnalysisModule): :type pct: bool """ if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, '\ - 'plot DISABLED!') + self._log.warning('Events [cpu_idle] not found, ' + 'plot DISABLED!') return if cpus is None: @@ -223,7 +220,7 @@ class IdleAnalysis(AnalysisModule): :type clusters: str ot list(str) """ if not self._trace.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, plot DISABLED!') + self._log.warning('Events [cpu_idle] not found, plot DISABLED!') return # Sanitize clusters diff --git a/libs/utils/analysis/latency_analysis.py b/libs/utils/analysis/latency_analysis.py index ba9a9a5d3..848d5cbef 100644 --- a/libs/utils/analysis/latency_analysis.py +++ b/libs/utils/analysis/latency_analysis.py @@ -29,9 +29,6 @@ from analysis_module import AnalysisModule from devlib.utils.misc import memoized from trappy.utils import listify -# Configure logging -import logging - # Tuple representing all IDs data of a Task TaskData = namedtuple('TaskData', ['pid', 'names', 'label']) @@ -54,12 +51,12 @@ class LatencyAnalysis(AnalysisModule): def _dfg_latency_df(self, task): if not self._trace.hasEvents('sched_wakeup'): - logging.warn('Events [sched_wakeup] not found, ' - 'cannot compute CPU active signal!') + self._log.warning('Events [sched_wakeup] not found, ' + 'cannot compute CPU active signal!') return None if not self._trace.hasEvents('sched_switch'): - logging.warn('Events [sched_switch] not found, ' - 'cannot compute CPU active signal!') + self._log.warning('Events [sched_switch] not found, ' + 'cannot compute CPU active signal!') return None # Get task data @@ -103,16 +100,16 @@ class LatencyAnalysis(AnalysisModule): numbers = 0 for value in task_switches_df.curr_state.unique(): if type(value) is not str: - logging.warning("The [sched_switch] events contain 'prev_state' value [%s]", - value) + self._log.warning('The [sched_switch] events contain "prev_state" value [%s]', + value) numbers += 1 if numbers: verb = 'is' if numbers == 1 else 'are' - logging.warning(" which %s not currently mapped into a task state.", - verb) - logging.warning("Check mappings in:") - logging.warning(" %s::%s _taskState()", __file__, - self.__class__.__name__) + self._log.warning(' which %s not currently mapped into a task state.', + verb) + self._log.warning('Check mappings in:') + self._log.warning(' %s::%s _taskState()', + __file__, self.__class__.__name__) # Forward annotate task state task_latency_df['next_state'] = task_latency_df.curr_state.shift(-1) @@ -178,12 +175,12 @@ class LatencyAnalysis(AnalysisModule): """ if not self._trace.hasEvents('sched_switch'): - logging.warn('Event [sched_switch] not found, ' - 'plot DISABLED!') + self._log.warning('Event [sched_switch] not found, ' + 'plot DISABLED!') return if not self._trace.hasEvents('sched_wakeup'): - logging.warn('Event [sched_wakeup] not found, ' - 'plot DISABLED!') + self._log.warning('Event [sched_wakeup] not found, ' + 'plot DISABLED!') return # Get task data @@ -197,7 +194,7 @@ class LatencyAnalysis(AnalysisModule): wkp_df = self._dfg_latency_wakeup_df(td.pid) if wkp_df is not None: wkp_df.rename(columns={'wakeup_latency' : 'latency'}, inplace=True) - logging.info("Found: %5d WAKEUP latencies", len(wkp_df)) + self._log.info('Found: %5d WAKEUP latencies', len(wkp_df)) # Load preempt latencies (if required) prt_df = None @@ -205,16 +202,16 @@ class LatencyAnalysis(AnalysisModule): prt_df = self._dfg_latency_preemption_df(td.pid) if prt_df is not None: prt_df.rename(columns={'preempt_latency' : 'latency'}, inplace=True) - logging.info("Found: %5d PREEMPT latencies", len(prt_df)) + self._log.info('Found: %5d PREEMPT latencies', len(prt_df)) if wkp_df is None and prt_df is None: - logging.warning("No Latency info for task [{}]".format(td.label)) + self._log.warning('No Latency info for task [%s]', td.label) return # Join the two data frames df = wkp_df.append(prt_df) ymax = 1.1 * df.latency.max() - logging.info("Total: %5d latency events", len(df)) + self._log.info('Total: %5d latency events', len(df)) df = pd.DataFrame(sorted(df.latency), columns=['latency']) @@ -285,12 +282,12 @@ class LatencyAnalysis(AnalysisModule): :type axes: :mod:`matplotlib.axes.Axes` """ if not self._trace.hasEvents('sched_switch'): - logging.warn('Event [sched_switch] not found, ' - 'plot DISABLED!') + self._log.warning('Event [sched_switch] not found, ' + 'plot DISABLED!') return if not self._trace.hasEvents('sched_wakeup'): - logging.warn('Event [sched_wakeup] not found, ' - 'plot DISABLED!') + self._log.warning('Event [sched_wakeup] not found, ' + 'plot DISABLED!') return # Get task PID @@ -302,7 +299,7 @@ class LatencyAnalysis(AnalysisModule): prt_df = self._dfg_latency_preemption_df(td.pid) if wkl_df is None and prt_df is None: - logging.warning("No task with name [{}]".format(td.label)) + self._log.warning('No task with name [%s]', td.label) return # If not axis provided: generate a standalone plot @@ -346,17 +343,17 @@ class LatencyAnalysis(AnalysisModule): if isinstance(task, str): task_pids = self._trace.getTaskByName(task) if len(task_pids) == 0: - logging.warning("No tasks found with name [%s]", task) + self._log.warning('No tasks found with name [%s]', task) return None task_pid = task_pids[0] if len(task_pids) > 1: - logging.warning("Multiple PIDs for task named [%s]", task) + self._log.warning('Multiple PIDs for task named [%s]', task) for pid in task_pids: - logging.warning(" %5d : %s", pid, - ','.join(self._trace.getTaskByPid(pid))) - logging.warning("Returning stats only for PID: %d", - task_pid) + self._log.warning(' %5d : %s', pid, + ','.join(self._trace.getTaskByPid(pid))) + self._log.warning('Returning stats only for PID: %d', + task_pid) task_names = self._trace.getTaskByPid(task_pid) # Get task name @@ -364,7 +361,7 @@ class LatencyAnalysis(AnalysisModule): task_pid = task task_names = self._trace.getTaskByPid(task_pid) if len(task_names) == 0: - logging.warning("No tasks found with name [%s]", task) + self._log.warning('No tasks found with name [%s]', task) return None else: diff --git a/libs/utils/analysis/status_analysis.py b/libs/utils/analysis/status_analysis.py index 771fdfe3f..a87522c7b 100644 --- a/libs/utils/analysis/status_analysis.py +++ b/libs/utils/analysis/status_analysis.py @@ -24,9 +24,6 @@ import matplotlib.pyplot as plt from analysis_module import AnalysisModule -# Configure logging -import logging - class StatusAnalysis(AnalysisModule): """ @@ -82,8 +79,8 @@ class StatusAnalysis(AnalysisModule): :type axes: :mod:`matplotlib.axes.Axes` """ if not self._trace.hasEvents('sched_overutilized'): - logging.warn('Event [sched_overutilized] not found, ' - 'plot DISABLED!') + self._log.warning('Event [sched_overutilized] not found, ' + 'plot DISABLED!') return df = self._dfg_overutilized() diff --git a/libs/utils/analysis/tasks_analysis.py b/libs/utils/analysis/tasks_analysis.py index 76ae64310..c76340ab8 100644 --- a/libs/utils/analysis/tasks_analysis.py +++ b/libs/utils/analysis/tasks_analysis.py @@ -27,9 +27,6 @@ from analysis_module import AnalysisModule from devlib.utils.misc import memoized from trappy.utils import listify -# Configure logging -import logging - class TasksAnalysis(AnalysisModule): """ @@ -60,7 +57,7 @@ class TasksAnalysis(AnalysisModule): :type min_utilization: int """ if not self._trace.hasEvents('sched_load_avg_task'): - logging.warn('Events [sched_load_avg_task] not found') + self._log.warning('Events [sched_load_avg_task] not found') return None if min_utilization is None: @@ -70,14 +67,14 @@ class TasksAnalysis(AnalysisModule): df = self._dfg_trace_event('sched_load_avg_task') big_tasks_events = df[df.util_avg > min_utilization] if not len(big_tasks_events): - logging.warn('No tasks with with utilization samples > %d', - min_utilization) + self._log.warning('No tasks with with utilization samples > %d', + min_utilization) return None # Report the number of tasks which match the min_utilization condition big_tasks = big_tasks_events.pid.unique() - logging.info('%5d tasks with samples of utilization > %d', - len(big_tasks), min_utilization) + self._log.info('%5d tasks with samples of utilization > %d', + len(big_tasks), min_utilization) # Compute number of samples above threshold big_tasks_stats = big_tasks_events.groupby('pid')\ @@ -88,12 +85,12 @@ class TasksAnalysis(AnalysisModule): # Filter for number of occurrences big_tasks_stats = big_tasks_stats[big_tasks_stats['count'] > min_samples] if not len(big_tasks_stats): - logging.warn(' but none with more than %d samples', - min_samples) + self._log.warning(' but none with more than %d samples', + min_samples) return None - logging.info(' %d with more than %d samples', - len(big_tasks_stats), min_samples) + self._log.info(' %d with more than %d samples', + len(big_tasks_stats), min_samples) # Add task name column big_tasks_stats['comm'] = big_tasks_stats.index.map( @@ -113,7 +110,7 @@ class TasksAnalysis(AnalysisModule): :type min_wakeups: int """ if not self._trace.hasEvents('sched_wakeup'): - logging.warn('Events [sched_wakeup] not found') + self._log.warning('Events [sched_wakeup] not found') return None df = self._dfg_trace_event('sched_wakeup') @@ -127,11 +124,11 @@ class TasksAnalysis(AnalysisModule): wkp_tasks_stats = wkp_tasks_stats[ wkp_tasks_stats['count'] > min_wakeups] if not len(df): - logging.warn('No tasks with more than %d wakeups', - len(wkp_tasks_stats)) + self._log.warning('No tasks with more than %d wakeups', + len(wkp_tasks_stats)) return None - logging.info('%5d tasks with more than %d wakeups', - len(df), len(wkp_tasks_stats)) + self._log.info('%5d tasks with more than %d wakeups', + len(df), len(wkp_tasks_stats)) # Add task name column wkp_tasks_stats['comm'] = wkp_tasks_stats.index.map( @@ -156,7 +153,7 @@ class TasksAnalysis(AnalysisModule): :type min_prio: int """ if not self._trace.hasEvents('sched_switch'): - logging.warn('Events [sched_switch] not found') + self._log.warning('Events [sched_switch] not found') return None df = self._dfg_trace_event('sched_switch') @@ -228,8 +225,8 @@ class TasksAnalysis(AnalysisModule): # Check for the minimum required signals to be available if not self._trace.hasEvents('sched_load_avg_task'): - logging.warn('Events [sched_load_avg_task] not found, ' - 'plot DISABLED!') + self._log.warning('Events [sched_load_avg_task] not found, ' + 'plot DISABLED!') return # Defined list of tasks to plot @@ -278,14 +275,14 @@ class TasksAnalysis(AnalysisModule): task_name = self._trace.getTaskByPid(tid) if len(task_name) == 1: task_name = task_name[0] - logging.info('Plotting %5d: %s...', tid, task_name) + self._log.info('Plotting %5d: %s...', tid, task_name) else: - logging.info('Plotting %5d: %s...', tid, ', '.join(task_name)) + self._log.info('Plotting %5d: %s...', tid, ', '.join(task_name)) plot_id = 0 # For each task create a figure with plots_count plots plt.figure(figsize=(16, 2*6+3)) - plt.suptitle("Task Signals", + plt.suptitle('Task Signals', y=.94, fontsize=16, horizontalalignment='center') # Plot load and utilization @@ -364,7 +361,7 @@ class TasksAnalysis(AnalysisModule): big_frequent_tasks_count = len(big_frequent_task_pids) if big_frequent_tasks_count == 0: - logging.warn("No big/frequent tasks to plot") + self._log.warning('No big/frequent tasks to plot') return # Get the list of events for all big frequent tasks @@ -407,8 +404,8 @@ class TasksAnalysis(AnalysisModule): ax.set_xlabel('Time [s]') - logging.info('Tasks which have been a "utilization" of %d for at least %d samples', - self._little_cap, min_samples) + self._log.info('Tasks which have been a "utilization" of %d for at least %d samples', + self._little_cap, min_samples) def plotWakeupTasks(self, max_tasks=10, min_wakeups=0, per_cluster=False): """ @@ -426,13 +423,13 @@ class TasksAnalysis(AnalysisModule): """ if per_cluster is True and \ not self._trace.hasEvents('sched_wakeup_new'): - logging.warn('Events [sched_wakeup_new] not found, ' - 'plots DISABLED!') + self._log.warning('Events [sched_wakeup_new] not found, ' + 'plots DISABLED!') return elif not self._trace.hasEvents('sched_wakeup') and \ not self._trace.hasEvents('sched_wakeup_new'): - logging.warn('Events [sched_wakeup, sched_wakeup_new] not found, ' - 'plots DISABLED!') + self._log.warning('Events [sched_wakeup, sched_wakeup_new] not found, ' + 'plots DISABLED!') return # Define axes for side-by-side plottings @@ -454,12 +451,12 @@ class TasksAnalysis(AnalysisModule): ntlc = df[little_frequent]; ntlc_count = len(ntlc) - logging.info("%5d tasks forked on big cluster (%3.1f %%)", - ntbc_count, - 100. * ntbc_count / (ntbc_count + ntlc_count)) - logging.info("%5d tasks forked on LITTLE cluster (%3.1f %%)", - ntlc_count, - 100. * ntlc_count / (ntbc_count + ntlc_count)) + self._log.info('%5d tasks forked on big cluster (%3.1f %%)', + ntbc_count, + 100. * ntbc_count / (ntbc_count + ntlc_count)) + self._log.info('%5d tasks forked on LITTLE cluster (%3.1f %%)', + ntlc_count, + 100. * ntlc_count / (ntbc_count + ntlc_count)) ax = axes[0] ax.set_title('Tasks Forks on big CPUs'); @@ -483,7 +480,8 @@ class TasksAnalysis(AnalysisModule): wkp_task_pids = self._dfg_top_wakeup_tasks(min_wakeups) if len(wkp_task_pids): wkp_task_pids = wkp_task_pids.index.values[:max_tasks] - logging.info("Plotting %d frequent wakeup tasks", len(wkp_task_pids)) + self._log.info('Plotting %d frequent wakeup tasks', + len(wkp_task_pids)) ax = axes[0] ax.set_title('Tasks WakeUps Events') @@ -526,10 +524,10 @@ class TasksAnalysis(AnalysisModule): """ if not self._trace.hasEvents('sched_load_avg_task'): - logging.warn('Events [sched_load_avg_task] not found') + self._log.warning('Events [sched_load_avg_task] not found') return if not self._trace.hasEvents('cpu_frequency'): - logging.warn('Events [cpu_frequency] not found') + self._log.warning('Events [cpu_frequency] not found') return if big_cluster: @@ -549,8 +547,8 @@ class TasksAnalysis(AnalysisModule): big_task_pids = big_task_pids.index.values df = df[df.pid.isin(big_task_pids)] if not df.size: - logging.warn('No events for tasks with more then %d utilization ' - 'samples bigger than %d, plots DISABLED!') + self._log.warning('No events for tasks with more then %d utilization ' + 'samples bigger than %d, plots DISABLED!') return fig, axes = plt.subplots(2, 1, figsize=(14, 5)) @@ -644,8 +642,8 @@ class TasksAnalysis(AnalysisModule): data.plot(ax=axes, style=['y-'], drawstyle='steps-post') else: task_name = self._trace.getTaskByPid(tid) - logging.warning("No 'boosted_util' data for task [%d:%s]", - tid, task_name) + self._log.warning('No "boosted_util" data for task [%d:%s]', + tid, task_name) # Add Capacities data if avilable if 'nrg_model' in self._platform: @@ -654,7 +652,7 @@ class TasksAnalysis(AnalysisModule): max_bcap = nrg_model['big']['cpu']['cap_max'] tip_lcap = 0.8 * max_lcap tip_bcap = 0.8 * max_bcap - logging.debug( + self._log.debug( 'LITTLE capacity tip/max: %d/%d, big capacity tip/max: %d/%d', tip_lcap, max_lcap, tip_bcap, max_bcap ) diff --git a/libs/utils/analysis_module.py b/libs/utils/analysis_module.py index 160ab1bcf..60fa4a52b 100644 --- a/libs/utils/analysis_module.py +++ b/libs/utils/analysis_module.py @@ -15,6 +15,8 @@ # limitations under the License. # +import logging + """ Helper module for Analysis classes """ @@ -41,4 +43,6 @@ class AnalysisModule(object): trace._registerDataFrameGetters(self) + self._log = logging.getLogger('Analysis') + # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/analysis_register.py b/libs/utils/analysis_register.py index 0f9f238bc..fb348245f 100644 --- a/libs/utils/analysis_register.py +++ b/libs/utils/analysis_register.py @@ -19,6 +19,7 @@ import os import sys +import logging from glob import glob from inspect import isclass @@ -26,8 +27,6 @@ from importlib import import_module from analysis_module import AnalysisModule -# Configure logging -import logging class AnalysisRegister(object): @@ -40,15 +39,18 @@ class AnalysisRegister(object): def __init__(self, trace): + # Setup logging + self._log = logging.getLogger('Analysis') + # Add workloads dir to system path analysis_dir = os.path.dirname(os.path.abspath(__file__)) analysis_dir = os.path.join(analysis_dir, 'analysis') - logging.debug('%14s - Analysis: %s', 'Analysis', analysis_dir) + self._log.debug('Analysis: %s', analysis_dir) sys.path.insert(0, analysis_dir) - logging.debug('%14s - Syspath: %s', 'Analysis', format(sys.path)) + self._log.debug('Syspath: %s', sys.path) - logging.info("Registering trace analysis modules:") + self._log.info('Registering trace analysis modules:') for filepath in glob(os.path.join(analysis_dir, '*.py')): filename = os.path.splitext(os.path.basename(filepath))[0] @@ -56,7 +58,7 @@ class AnalysisRegister(object): if filename.startswith('__'): continue - logging.debug('%14s - Filename: %s', 'Analysis', filename) + self._log.debug('Filename: %s', filename) # Import the module for inspection module = import_module(filename) @@ -69,6 +71,6 @@ class AnalysisRegister(object): issubclass(handler, AnalysisModule): module_name = module.__name__.replace('_analysis', '') setattr(self, module_name, handler(trace)) - logging.info(" %s", module_name) + self._log.info(' %s', module_name) # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/android/screen.py b/libs/utils/android/screen.py index 2f491216e..f5af5377e 100644 --- a/libs/utils/android/screen.py +++ b/libs/utils/android/screen.py @@ -15,6 +15,8 @@ # limitations under the License. # +import logging + class Screen(object): """ Set of utility functions to control an Android Screen @@ -25,18 +27,19 @@ class Screen(object): """ Set screen orientation mode """ + log = logging.getLogger('Screen') acc_mode = 1 if auto else 0 # Force manual orientation of portrait specified if portrait is not None: acc_mode = 0 - target.logger.info("Force manual orientation") + log.info('Force manual orientation') if acc_mode == 0: usr_mode = 0 if portrait else 1 usr_mode_str = 'PORTRAIT' if portrait else 'LANDSCAPE' - target.logger.info('Set orientation: %s', usr_mode_str) + log.info('Set orientation: %s', usr_mode_str) else: usr_mode = 0 - target.logger.info('Set orientation: AUTO') + log.info('Set orientation: AUTO') if acc_mode == 0: target.execute('content insert '\ @@ -63,6 +66,7 @@ class Screen(object): """ Set screen brightness percentage """ + log = logging.getLogger('Screen') bri_mode = 1 if auto else 0 # Force manual brightness if a percent specified if percent: @@ -81,28 +85,30 @@ class Screen(object): '--uri content://settings/system '\ '--bind name:s:screen_brightness '\ '--bind value:i:{}'.format(value)) - target.logger.info('Set brightness: %d%%', percent) + log.info('Set brightness: %d%%', percent) else: - target.logger.info('Set brightness: AUTO') + log.info('Set brightness: AUTO') @staticmethod def set_dim(target, auto=True): """ Set screen dimming mode """ + log = logging.getLogger('Screen') dim_mode = 1 if auto else 0 dim_mode_str = 'ON' if auto else 'OFF' target.execute('content insert '\ '--uri content://settings/system '\ '--bind name:s:dim_screen '\ '--bind value:i:{}'.format(dim_mode)) - target.logger.info('Dim screen mode: %s', dim_mode_str) + log.info('Dim screen mode: %s', dim_mode_str) @staticmethod def set_timeout(target, seconds=30): """ Set screen off timeout in seconds """ + log = logging.getLogger('Screen') if seconds<0: msg = "Screen timeout {}: cannot be negative".format(seconds) raise ValueError(msg) @@ -111,7 +117,7 @@ class Screen(object): '--uri content://settings/system '\ '--bind name:s:screen_off_timeout '\ '--bind value:i:{}'.format(value)) - target.logger.info('Screen timeout: %d [s]', seconds) + log.info('Screen timeout: %d [s]', seconds) @staticmethod def set_defaults(target): diff --git a/libs/utils/android/system.py b/libs/utils/android/system.py index 2268a4f91..701704156 100644 --- a/libs/utils/android/system.py +++ b/libs/utils/android/system.py @@ -16,6 +16,7 @@ # import logging + from devlib.utils.android import adb_command from devlib import TargetError @@ -42,7 +43,8 @@ class System(object): '--ez state {}'\ .format(ap_state), as_root=True) except TargetError: - target.logger.warning("Failed to toggle airplane mode, permission denied.") + log = logging.getLogger('System') + log.warning('Failed to toggle airplane mode, permission denied.') @staticmethod def start_app(target, apk_name): diff --git a/libs/utils/android/workload.py b/libs/utils/android/workload.py index 8b097a162..122c9a7c7 100644 --- a/libs/utils/android/workload.py +++ b/libs/utils/android/workload.py @@ -17,6 +17,7 @@ import os import sys +import logging from glob import glob from inspect import isclass @@ -24,7 +25,6 @@ from importlib import import_module from collections import namedtuple -import logging class Workload(object): """ @@ -32,11 +32,6 @@ class Workload(object): """ _availables = None - # Setup logger - logger = logging.getLogger('Workload') - logger.setLevel(logging.INFO) - - _AW = namedtuple('AndroidWorkload', ['module_name', 'module', 'class_name', 'ctor']) @@ -48,8 +43,9 @@ class Workload(object): if Workload._availables is None: Workload.availables(te.target) # Build list of case insensitive workload names + log = logging.getLogger('Workload') if name not in Workload._availables: - logging.warning('Workload [%s] not available on target', name) + log.warning('Workload [%s] not available on target', name) return None return Workload._availables[name].ctor(te) @@ -63,17 +59,20 @@ class Workload(object): Workload._availables = {} + log = logging.getLogger('Workload') + log.debug('Building list of available workloads...') + # Add workloads dir to system path workloads_dir = os.path.dirname(os.path.abspath(__file__)) workloads_dir = os.path.join(workloads_dir, 'workloads') - logging.debug('%14s - Workdir: %s', 'Workload', workloads_dir) + log.debug('Workdir: %s', workloads_dir) sys.path.insert(0, workloads_dir) - logging.debug('%14s - Syspath: %s', 'Workload', format(sys.path)) + log.debug('Syspath: %s', sys.path) for filepath in glob(os.path.join(workloads_dir, '*.py')): filename = os.path.splitext(os.path.basename(filepath))[0] - logging.debug('%14s - Filename: %s', 'Workload', filename) + log.debug('Filename: %s', filename) # Ignore __init__ files if filename.startswith('__'): @@ -113,10 +112,9 @@ class Workload(object): if int(count) >= 1: return True - logging.warning('%14s - Package [%s] not installed', - 'Workload', package) - logging.warning('%14s - Workload [%s] disabled', - 'Workload', aw.class_name) + log = logging.getLogger('Workload') + log.warning('Package [%s] not installed', package) + log.warning('Workload [%s] disabled', aw.class_name) return False def __init__(self, test_env): @@ -127,12 +125,11 @@ class Workload(object): """ self.te = test_env self.target = test_env.target - self.logger = self.target.logger + self._log = logging.getLogger('Workload') - logging.debug('%14s - Building list of available workloads...', 'Workload') wloads = Workload.availables(self.target) - logging.info('%14s - Workloads available on target:', 'Workload') - logging.info('%14s - %s', 'Workload', wloads) + self._log.info('Workloads available on target:') + self._log.info(' %s', wloads) def _adb(self, cmd): return 'adb -s {} {}'.format(self.target.adb_name, cmd) diff --git a/libs/utils/android/workloads/jankbench.py b/libs/utils/android/workloads/jankbench.py index b510ae838..e90664fde 100644 --- a/libs/utils/android/workloads/jankbench.py +++ b/libs/utils/android/workloads/jankbench.py @@ -17,11 +17,11 @@ import re import os +import logging from subprocess import Popen, PIPE from android import Screen, System, Workload -import logging # Available test workloads _jankbench = { @@ -61,14 +61,10 @@ class Jankbench(Workload): # Package required by this workload package = 'com.android.benchmark' - # Setup logger - logger = logging.getLogger('Jankbench') - logger.setLevel(logging.INFO) - - def __init__(self, test_env): super(Jankbench, self).__init__(test_env) - logging.debug('%14s - Workload created', 'Jankbench') + self._log = logging.getLogger('Jankbench') + self._log.debug('Workload created') def run(self, exp_dir, test_name, iterations, collect=''): # Setup test id @@ -96,21 +92,21 @@ class Jankbench(Workload): # Clear logcat os.system(self._adb('logcat -c')); - self.logger.debug('Start Jank Benchmark [%d:%s]', test_id, test_name) + self._log.debug('Start Jank Benchmark [%d:%s]', test_id, test_name) test_cmd = 'am start -n "com.android.benchmark/.app.RunLocalBenchmarksActivity" '\ '--eia "com.android.benchmark.EXTRA_ENABLED_BENCHMARK_IDS" {0} '\ '--ei "com.android.benchmark.EXTRA_RUN_COUNT" {1}'\ .format(test_id, iterations) - self.logger.info(test_cmd) + self._log.info(test_cmd) self.target.execute(test_cmd); # Parse logcat output lines logcat_cmd = self._adb( 'logcat ActivityManager:* System.out:I *:S BENCH:*'\ .format(self.target.adb_name)) - self.logger.info("%s", logcat_cmd) + self._log.info(logcat_cmd) - self.logger.debug("Iterations:") + self._log.debug('Iterations:') logcat = Popen(logcat_cmd, shell=True, stdout=PIPE) while True: @@ -122,30 +118,30 @@ class Jankbench(Workload): if match: if 'energy' in collect and self.te.emeter: self.te.emeter.reset() - self.logger.debug("Benchmark started!") + self._log.debug('Benchmark started!') # Benchmark completed trigger match = JANKBENCH_BENCHMARK_DONE_RE.search(message) if match: if 'energy' in collect and self.te.emeter: nrg_report = self.te.emeter.report(exp_dir) - self.logger.debug("Benchmark done!") + self._log.debug('Benchmark done!') break # Iteration completd match = JANKBENCH_ITERATION_COUNT_RE.search(message) if match: - self.logger.debug("Iteration %2d:", - int(match.group('iteration'))+1) + self._log.debug('Iteration %2d:', + int(match.group('iteration'))+1) # Iteration metrics match = JANKBENCH_ITERATION_METRICS_RE.search(message) if match: - self.logger.info(" Mean: %7.3f JankP: %7.3f StdDev: %7.3f Count Bad: %4d Count Jank: %4d", - float(match.group('mean')), - float(match.group('junk_p')), - float(match.group('std_dev')), - int(match.group('count_bad')), - int(match.group('count_junk'))) + self._log.info(' Mean: %7.3f JankP: %7.3f StdDev: %7.3f Count Bad: %4d Count Jank: %4d', + float(match.group('mean')), + float(match.group('junk_p')), + float(match.group('std_dev')), + int(match.group('count_bad')), + int(match.group('count_junk'))) # get results db_file = os.path.join(exp_dir, JANKBENCH_DB_NAME) diff --git a/libs/utils/android/workloads/youtube.py b/libs/utils/android/workloads/youtube.py index 142bef49b..4aa333359 100644 --- a/libs/utils/android/workloads/youtube.py +++ b/libs/utils/android/workloads/youtube.py @@ -17,11 +17,11 @@ import re import os +import logging from android import Screen, Workload, System from time import sleep -import logging class YouTube(Workload): """ @@ -32,14 +32,10 @@ class YouTube(Workload): package = 'com.google.android.youtube' action = 'android.intent.action.VIEW' - # Setup logger - logger = logging.getLogger('YouTube') - logger.setLevel(logging.INFO) - - def __init__(self, test_env): super(YouTube, self).__init__(test_env) - self.logger.debug('%14s - Workload created', 'YouTube') + self._log = logging.getLogger('YouTube') + self._log.debug('Workload created') def run(self, exp_dir, video_url, video_duration_s, collect=''): @@ -72,7 +68,7 @@ class YouTube(Workload): self.te.emeter.reset() # Wait until the end of the video - self.logger.info("Play video for %d [s]", video_duration_s) + self._log.info('Play video for %d [s]', video_duration_s) sleep(video_duration_s) # Stop energy collection diff --git a/libs/utils/conf.py b/libs/utils/conf.py index 98b30d544..dc19b2d92 100644 --- a/libs/utils/conf.py +++ b/libs/utils/conf.py @@ -16,9 +16,39 @@ # import json -import logging import os import re +import logging +import logging.config + + +class LisaLogging(object): + + @classmethod + def setup(self, filepath='logging.conf', level=logging.INFO): + """ + Initialize logging used for all the LISA modules. + + :param filepath: the relative or absolute path of the logging configuration to use. + Relative path uses the LISA_HOME environment variable + has base folder. + :type filepath: str + + :param level: the default log level to enable, INFO by default + :type level: logging. or int in [0..50] + """ + + # Load the specified logfile using an absolute path + basepath = os.path.dirname(__file__).replace('/libs/utils', '') + filepath = os.path.join(basepath, filepath) + if not os.path.exists(filepath): + raise ValueError('Logging configuration file not found in: {}'\ + .format(filepath)) + logging.config.fileConfig(filepath) + logging.getLogger().setLevel(level) + + logging.info('Using LISA logging configuration:') + logging.info(' %s', filepath) class JsonConf(object): @@ -36,11 +66,15 @@ class JsonConf(object): ... */ """ + + # Setup logging + self._log = logging.getLogger('JsonConf') + if not os.path.isfile(self.filename): raise RuntimeError( 'Missing configuration file: {}'.format(self.filename) ) - logging.debug('loading JSON...') + self._log.debug('loading JSON...') with open(self.filename) as fh: content = ''.join(fh.readlines()) @@ -60,7 +94,8 @@ class JsonConf(object): # Return json file self.json = json.loads(content, parse_int=int) - logging.debug('Loaded JSON configuration:\n%s', self.json) + self._log.debug('Loaded JSON configuration:') + self._log.debug(' %s', self.json) return self.json diff --git a/libs/utils/energy.py b/libs/utils/energy.py index 36166de12..7c508323b 100644 --- a/libs/utils/energy.py +++ b/libs/utils/energy.py @@ -17,15 +17,16 @@ import devlib import json -import logging import os import psutil import time +import logging from collections import namedtuple from subprocess import Popen, PIPE, STDOUT from time import sleep + # Default energy measurements for each board DEFAULT_ENERGY_METER = { @@ -84,24 +85,28 @@ class EnergyMeter(object): if not self._res_dir: self._res_dir = '/tmp' + # Setup logging + self._log = logging.getLogger('EnergyMeter') + @staticmethod def getInstance(target, conf, force=False, res_dir=None): if not force and EnergyMeter._meter: return EnergyMeter._meter + log = logging.getLogger('EnergyMeter') + # Initialize energy meter based on configuration if 'emeter' in conf: emeter = conf['emeter'] - logging.debug('%14s - using user-defined configuration', - 'EnergyMeter') + log.debug('using user-defined configuration') # Initialize energy probe to board default elif 'board' in conf and \ conf['board'] in DEFAULT_ENERGY_METER: emeter = DEFAULT_ENERGY_METER[conf['board']] - logging.debug('%14s - using default energy meter for [%s]', - 'EnergyMeter', conf['board']) + log.debug('using default energy meter for [%s]', + conf['board']) else: return None @@ -112,7 +117,7 @@ class EnergyMeter(object): elif emeter['instrument'] == 'acme': EnergyMeter._meter = ACME(target, emeter, res_dir) - logging.debug('%14s - Results dir: %s', 'EnergyMeter', res_dir) + log.debug('Results dir: %s', res_dir) return EnergyMeter._meter def sample(self): @@ -136,23 +141,22 @@ class HWMon(EnergyMeter): self.readings = {} if 'hwmon' not in self._target.modules: - logging.info('%14s - HWMON module not enabled', 'HWMon') - logging.warning('%14s - Energy sampling disabled by configuration', - 'HWMon') + self._log.info('HWMON module not enabled') + self._log.warning('Energy sampling disabled by configuration') return # Initialize HWMON instrument - logging.info('%14s - Scanning for HWMON channels, may take some time...', 'HWMon') + self._log.info('Scanning for HWMON channels, may take some time...') self._hwmon = devlib.HwmonInstrument(self._target) # Configure channels for energy measurements - logging.debug('%14s - Enabling channels %s', 'HWMon', conf['conf']) + self._log.debug('Enabling channels %s', conf['conf']) self._hwmon.reset(**conf['conf']) # Logging enabled channels - logging.info('%14s - Channels selected for energy sampling:', 'HWMon') + self._log.info('Channels selected for energy sampling:') for channel in self._hwmon.active_channels: - logging.info('%14s - %s', 'HWMon', channel.label) + self._log.info(' %s', channel.label) # record the HWMon channels self._channels = conf.get('channel_map', { @@ -182,7 +186,7 @@ class HWMon(EnergyMeter): self.readings[label]['last'] = value self.readings[label]['total'] += self.readings[label]['delta'] - logging.debug('SAMPLE: %s', self.readings) + self._log.debug('SAMPLE: %s', self.readings) return self.readings def reset(self): @@ -192,7 +196,7 @@ class HWMon(EnergyMeter): for label in self.readings: self.readings[label]['delta'] = 0 self.readings[label]['total'] = 0 - logging.debug('RESET: %s', self.readings) + self._log.debug('RESET: %s', self.readings) def report(self, out_dir, out_file='energy.json'): if self._hwmon is None: @@ -204,7 +208,7 @@ class HWMon(EnergyMeter): for channel in self._channels: label = self._channels[channel] nrg_total = nrg[label]['total'] - logging.debug('%14s - Energy [%16s]: %.6f', 'HWMon', label, nrg_total) + self._log.debug('Energy [%16s]: %.6f', label, nrg_total) clusters_nrg[channel] = nrg_total # Dump data as JSON file @@ -226,19 +230,19 @@ class AEP(EnergyMeter): self.time = {} # Configure channels for energy measurements - logging.info('%14s - AEP configuration', 'AEP') - logging.info('%14s - %s', 'AEP', conf) + self._log.info('AEP configuration') + self._log.info(' %s', conf) self._aep = devlib.EnergyProbeInstrument( self._target, labels=conf['channel_map'], **conf['conf']) # Configure channels for energy measurements - logging.debug('%14s - Enabling channels', 'AEP') + self._log.debug('Enabling channels') self._aep.reset() # Logging enabled channels - logging.info('%14s - Channels selected for energy sampling:\n%s', - 'AEP', str(self._aep.active_channels)) - logging.debug('%14s - Results dir: %s', 'AEP', self._res_dir) + self._log.info('Channels selected for energy sampling:') + self._log.info(' %s', str(self._aep.active_channels)) + self._log.debug('Results dir: %s', self._res_dir) def _get_energy(self, samples, time, idx, site): pwr_total = 0 @@ -275,7 +279,7 @@ class AEP(EnergyMeter): return self.channels = [] - logging.debug('RESET: %s', self.channels) + self._log.debug('RESET: %s', self.channels) self._aep.start() self.time['start'] = time.time() @@ -291,8 +295,8 @@ class AEP(EnergyMeter): # Reformat data for output generation channels_nrg = {} for channel in self.channels: - logging.debug('%14s - Energy [%16s]: %.6f', 'AEP', - channel.site, channel.nrg) + self._log.debug('Energy [%16s]: %.6f', + channel.site, channel.nrg) channels_nrg[channel.site] = channel.nrg # Dump data as JSON file @@ -302,6 +306,18 @@ class AEP(EnergyMeter): return EnergyReport(channels_nrg, nrg_file) + +_acme_install_instructions = ''' + + If you need to measure energy using an ACME EnergyProbe, + please do follow installation instructions available here: + https://github.com/ARM-software/lisa/wiki/Energy-Meters-Requirements#iiocapture---baylibre-acme-cape + + Othwerwise, please select a different energy meter in your + configuration file. + +''' + class ACME(EnergyMeter): """ BayLibre's ACME board based EnergyMeter @@ -318,26 +334,20 @@ class ACME(EnergyMeter): }) self._iio = [None] * len(self._channels) - logging.info('%14s - ACME configuration:', 'ACME') - logging.info('%14s - binary: %s', 'ACME', self._iiocapturebin) - logging.info('%14s - device: %s', 'ACME', self._hostname) - logging.info('%14s - channels:', 'ACME') + self._log.info('ACME configuration:') + self._log.info(' binary: %s', self._iiocapturebin) + self._log.info(' device: %s', self._hostname) + self._log.info(' channels:') for channel in self._channels: - logging.info('%14s - %s', 'ACME', self._str(channel)) + self._log.info(' %s', self._str(channel)) # Check if iio-capture binary is available try: p = Popen([self._iiocapturebin, '-h'], stdout=PIPE, stderr=STDOUT) except: - logging.error('%14s - iio-capture binary [%s] not available', - 'ACME', self._iiocapturebin) - logging.warning('\n\n'\ - ' If you need to measure energy using an ACME EnergyProbe,\n'\ - ' please do follow installation instructions available here:\n'\ - ' https://github.com/ARM-software/lisa/wiki/Energy-Meters-Requirements#iiocapture---baylibre-acme-cape\n'\ - ' Othwerwise, please select a different energy meter in your\n'\ - ' configuration file'\ - ) + self._log.error('iio-capture binary [%s] not available', + self._iiocapturebin) + self._log.warning(_acme_install_instructions) raise RuntimeError('Missing iio-capture binary') def sample(self): @@ -361,9 +371,9 @@ class ACME(EnergyMeter): continue for channel in self._channels: if self._iio_device(channel) in proc.cmdline(): - logging.debug('%14s - Killing previous iio-capture for [%s]', - 'ACME', self._iio_device(channel)) - logging.debug('%14s - %s', 'ACME', proc.cmdline()) + self._log.debug('Killing previous iio-capture for [%s]', + self._iio_device(channel)) + self._log.debug(proc.cmdline()) proc.kill() wait_for_termination = 2 @@ -397,19 +407,19 @@ class ACME(EnergyMeter): self._iio[ch_id].poll() if self._iio[ch_id].returncode: - logging.error('%14s - Failed to run %s for %s', 'ACME', - self._iiocapturebin, self._str(channel)) - logging.warning('\n\n'\ + self._log.error('Failed to run %s for %s', + self._iiocapturebin, self._str(channel)) + self._log.warning('\n\n'\ ' Make sure there are no iio-capture processes\n'\ ' connected to %s and device %s\n', self._hostname, self._str(channel)) out, _ = self._iio[ch_id].communicate() - logging.error('%14s - Output: [%s]', 'ACME', out.strip()) + self._log.error('Output: [%s]', out.strip()) self._iio[ch_id] = None raise RuntimeError('iio-capture connection error') - logging.debug('%14s - Started %s on %s...', 'ACME', - self._iiocapturebin, self._str(channel)) + self._log.debug('Started %s on %s...', + self._iiocapturebin, self._str(channel)) def report(self, out_dir, out_energy='energy.json'): """ @@ -433,10 +443,10 @@ class ACME(EnergyMeter): if self._iio[ch_id].returncode: # returncode not None means that iio-capture has terminated # already, so there must have been an error - logging.error('%14s - %s terminated for %s', 'ACME', - self._iiocapturebin, self._str(channel)) + self._log.error('%s terminated for %s', + self._iiocapturebin, self._str(channel)) out, _ = self._iio[ch_id].communicate() - logging.error('%14s - [%s]', 'ACME', out) + self._log.error('[%s]', out) self._iio[ch_id] = None continue @@ -446,14 +456,14 @@ class ACME(EnergyMeter): self._iio[ch_id].wait() self._iio[ch_id] = None - logging.debug('%14s - Completed IIOCapture for %s...', - 'ACME', self._str(channel)) + self._log.debug('Completed IIOCapture for %s...', + self._str(channel)) # iio-capture return "energy=value", add a simple format check if '=' not in out: - logging.error('%14s - Bad output format for %s:', - 'ACME', self._str(channel)) - logging.error('%14s - [%s]', 'ACME', out) + self._log.error('Bad output format for %s:', + self._str(channel)) + self._log.error('[%s]', out) continue # Build energy counter object @@ -463,8 +473,8 @@ class ACME(EnergyMeter): nrg[key] = float(val) channels_stats[channel] = nrg - logging.debug('%14s - %s', 'ACME', self._str(channel)) - logging.debug('%14s - %s', 'ACME', nrg) + self._log.debug(self._str(channel)) + self._log.debug(nrg) # Save CSV samples file to out_dir os.system('mv {}/samples_{}.csv {}' diff --git a/libs/utils/env.py b/libs/utils/env.py index 8f5fc450e..2d952b96f 100644 --- a/libs/utils/env.py +++ b/libs/utils/env.py @@ -123,38 +123,35 @@ class TestEnv(ShareState): self.CATAPULT_HOME = os.environ.get('CATAPULT_HOME', os.path.join(self.LISA_HOME, 'tools', 'catapult')) + # Setup logging + self._log = logging.getLogger('TestEnv') + # Compute base installation path - logging.info('%14s - Using base path: %s', - 'Target', basepath) + self._log.info('Using base path: %s', basepath) # Setup target configuration if isinstance(target_conf, dict): - logging.info('%14s - Loading custom (inline) target configuration', - 'Target') + self._log.info('Loading custom (inline) target configuration') self.conf = target_conf elif isinstance(target_conf, str): - logging.info('%14s - Loading custom (file) target configuration', - 'Target') - self.conf = TestEnv.loadTargetConfig(target_conf) + self._log.info('Loading custom (file) target configuration') + self.conf = self.loadTargetConfig(target_conf) elif target_conf is None: - logging.info('%14s - Loading default (file) target configuration', - 'Target') - self.conf = TestEnv.loadTargetConfig() - logging.debug('%14s - Target configuration %s', 'Target', self.conf) + self._log.info('Loading default (file) target configuration') + self.conf = self.loadTargetConfig() + self._log.debug('Target configuration %s', self.conf) # Setup test configuration if test_conf: if isinstance(test_conf, dict): - logging.info('%14s - Loading custom (inline) test configuration', - 'Target') + self._log.info('Loading custom (inline) test configuration') self.test_conf = test_conf elif isinstance(test_conf, str): - logging.info('%14s - Loading custom (file) test configuration', - 'Target') - self.test_conf = TestEnv.loadTargetConfig(test_conf) + self._log.info('Loading custom (file) test configuration') + self.test_conf = self.loadTargetConfig(test_conf) else: raise ValueError('test_conf must be either a dictionary or a filepath') - logging.debug('%14s - Test configuration %s', 'Target', self.conf) + self._log.debug('Test configuration %s', self.conf) # Setup target working directory if 'workdir' in self.conf: @@ -213,8 +210,8 @@ class TestEnv(ShareState): self.res_dir = datetime.datetime.now()\ .strftime(self.res_dir + '/%Y%m%d_%H%M%S') if wipe and os.path.exists(self.res_dir): - logging.warning('%14s - Wipe previous contents of the results folder:', 'TestEnv') - logging.warning('%14s - %s', 'TestEnv', self.res_dir) + self._log.warning('Wipe previous contents of the results folder:') + self._log.warning(' %s', self.res_dir) shutil.rmtree(self.res_dir, ignore_errors=True) if not os.path.exists(self.res_dir): os.makedirs(self.res_dir) @@ -227,15 +224,14 @@ class TestEnv(ShareState): # Initialize energy probe instrument self._init_energy(True) - logging.info('%14s - Set results folder to:', 'TestEnv') - logging.info('%14s - %s', 'TestEnv', self.res_dir) - logging.info('%14s - Experiment results available also in:', 'TestEnv') - logging.info('%14s - %s', 'TestEnv', res_lnk) + self._log.info('Set results folder to:') + self._log.info(' %s', self.res_dir) + self._log.info('Experiment results available also in:') + self._log.info(' %s', res_lnk) self._initialized = True - @staticmethod - def loadTargetConfig(filepath='target.config'): + def loadTargetConfig(self, filepath='target.config'): """ Load the target configuration from the specified file. @@ -251,17 +247,14 @@ class TestEnv(ShareState): # Loading default target configuration conf_file = os.path.join(basepath, filepath) - logging.info('%14s - Loading target configuration [%s]...', - 'Target', conf_file) + + self._log.info('Loading target configuration [%s]...', conf_file) conf = JsonConf(conf_file) conf.load() return conf.json def _init(self, force = False): - if self._feature('debug'): - logging.getLogger().setLevel(logging.DEBUG) - # Initialize target self._init_target(force) @@ -289,8 +282,8 @@ class TestEnv(ShareState): [i for i,v in enumerate(self.target.core_clusters) if v == c]) self.topology = Topology(clusters=CLUSTERS) - logging.info(r'%14s - Topology:', 'Target') - logging.info(r'%14s - %s', 'Target', CLUSTERS) + self._log.info('Topology:') + self._log.info(' %s', CLUSTERS) # Initialize the platform descriptor self._init_platform() @@ -350,11 +343,9 @@ class TestEnv(ShareState): else: raise RuntimeError('Android SDK not found, ANDROID_HOME must be defined!') - logging.info(r'%14s - External tools using:', 'Target') - logging.info(r'%14s - ANDROID_HOME: %s', 'Target', - self.ANDROID_HOME) - logging.info(r'%14s - CATAPULT_HOME: %s', 'Target', - self.CATAPULT_HOME) + self._log.info('External tools using:') + self._log.info(' ANDROID_HOME: %s', self.ANDROID_HOME) + self._log.info(' CATAPULT_HOME: %s', self.CATAPULT_HOME) if not os.path.exists(self._adb): raise RuntimeError('\nADB binary not found\n\t{}\ndoes not exists!\n\n' @@ -427,8 +418,7 @@ class TestEnv(ShareState): if module in self.__modules: self.__modules.remove(module) - logging.info(r'%14s - Devlib modules to load: %s', - 'Target', self.__modules) + self._log.info('Devlib modules to load: %s', self.__modules) ######################################################################## # Devlib target setup (based on target.config::platform) @@ -448,20 +438,18 @@ class TestEnv(ShareState): port = str(self.conf['port']) device = '{}:{}'.format(host, port) self.__connection_settings = {'device' : device} - logging.info(r'%14s - Connecting Android target [%s]', - 'Target', device) + self._log.info('Connecting Android target [%s]', device) else: - logging.info(r'%14s - Connecting %s target:', 'Target', - platform_type) + self._log.info('Connecting %s target:', platform_type) for key in self.__connection_settings: - logging.info(r'%14s - %10s : %s', 'Target', - key, self.__connection_settings[key]) + self._log.info('%10s : %s', key, + self.__connection_settings[key]) - logging.info(r'%14s - Connection settings:', 'Target') - logging.info(r'%14s - %s', 'Target', self.__connection_settings) + self._log.info('Connection settings:') + self._log.info(' %s', self.__connection_settings) if platform_type.lower() == 'linux': - logging.debug('%14s - Setup LINUX target...', 'Target') + self._log.debug('Setup LINUX target...') if "host" not in self.__connection_settings: raise ValueError('Missing "host" param in Linux target conf') @@ -471,14 +459,14 @@ class TestEnv(ShareState): load_default_modules = False, modules = self.__modules) elif platform_type.lower() == 'android': - logging.debug('%14s - Setup ANDROID target...', 'Target') + self._log.debug('Setup ANDROID target...') self.target = devlib.AndroidTarget( platform = platform, connection_settings = self.__connection_settings, load_default_modules = False, modules = self.__modules) elif platform_type.lower() == 'host': - logging.debug('%14s - Setup HOST target...', 'Target') + self._log.debug('Setup HOST target...') self.target = devlib.LocalLinuxTarget( platform = platform, load_default_modules = False, @@ -487,14 +475,14 @@ class TestEnv(ShareState): raise ValueError('Config error: not supported [platform] type {}'\ .format(platform_type)) - logging.debug('%14s - Checking target connection...', 'Target') - logging.debug('%14s - Target info:', 'Target') - logging.debug('%14s - ABI: %s', 'Target', self.target.abi) - logging.debug('%14s - CPUs: %s', 'Target', self.target.cpuinfo) - logging.debug('%14s - Clusters: %s', 'Target', self.target.core_clusters) + self._log.debug('Checking target connection...') + self._log.debug('Target info:') + self._log.debug(' ABI: %s', self.target.abi) + self._log.debug(' CPUs: %s', self.target.cpuinfo) + self._log.debug(' Clusters: %s', self.target.core_clusters) - logging.info('%14s - Initializing target workdir:', 'Target') - logging.info('%14s - %s', 'Target', self.target.working_directory) + self._log.info('Initializing target workdir:') + self._log.info(' %s', self.target.working_directory) tools_to_install = [] if self.__tools: for tool in self.__tools: @@ -507,12 +495,11 @@ class TestEnv(ShareState): # Verify that all the required modules have been initialized for module in self.__modules: - logging.debug('%14s - Check for module [%s]...', 'Target', module) + self._log.debug('Check for module [%s]...', module) if not hasattr(self.target, module): - logging.warning('%14s - Unable to initialize [%s] module', - 'Target', module) - logging.error('%14s - Fix your target kernel configuration or ' - 'disable module from configuration', 'Target') + self._log.warning('Unable to initialize [%s] module', module) + self._log.error('Fix your target kernel configuration or ' + 'disable module from configuration') raise RuntimeError('Failed to initialized [{}] module, ' 'update your kernel or test configurations'.format(module)) @@ -554,13 +541,13 @@ class TestEnv(ShareState): ) if events: - logging.info('%14s - Enabled tracepoints:', 'FTrace') + self._log.info('Enabled tracepoints:') for event in events: - logging.info('%14s - %s', 'FTrace', event) + self._log.info(' %s', event) if functions: - logging.info('%14s - Kernel functions profiled:', 'FTrace') + self._log.info('Kernel functions profiled:') for function in functions: - logging.info('%14s - %s', 'FTrace', function) + self._log.info(' %s', function) return self.ftrace @@ -602,9 +589,7 @@ class TestEnv(ShareState): self.platform['freqs'][cluster_id] = \ self.target.cpufreq.list_frequencies(core_id) else: - logging.warn( - '%14s - Unable to identify cluster frequencies', - 'Target') + self._log.warning('Unable to identify cluster frequencies') # TODO: get the performance boundaries in case of intel_pstate driver @@ -613,12 +598,11 @@ class TestEnv(ShareState): def _load_em(self, board): em_path = os.path.join(basepath, 'libs/utils/platforms', board.lower() + '.json') - logging.debug('%14s - Trying to load default EM from %s', - 'Platform', em_path) + self._log.debug('Trying to load default EM from %s', em_path) if not os.path.exists(em_path): return None - logging.info('%14s - Loading default EM:', 'Platform') - logging.info('%14s - %s', 'Platform', em_path) + self._log.info('Loading default EM:') + self._log.info(' %s', em_path) board = JsonConf(em_path) board.load() if 'nrg_model' not in board.json: @@ -628,12 +612,11 @@ class TestEnv(ShareState): def _load_board(self, board): board_path = os.path.join(basepath, 'libs/utils/platforms', board.lower() + '.json') - logging.debug('%14s - Trying to load board descriptor from %s', - 'Platform', board_path) + self._log.debug('Trying to load board descriptor from %s', board_path) if not os.path.exists(board_path): return None - logging.info('%14s - Loading board:', 'Platform') - logging.info('%14s - %s', 'Platform', board_path) + self._log.info('Loading board:') + self._log.info(' %s', board_path) board = JsonConf(board_path) board.load() if 'board' not in board.json: @@ -666,14 +649,12 @@ class TestEnv(ShareState): # Adding topology information self.platform['topology'] = self.topology.get_level("cluster") - logging.debug('%14s - Platform descriptor initialized\n%s', - 'Platform', self.platform) + self._log.debug('Platform descriptor initialized\n%s', self.platform) # self.platform_dump('./') def platform_dump(self, dest_dir, dest_file='platform.json'): plt_file = os.path.join(dest_dir, dest_file) - logging.debug('%14s - Dump platform descriptor in [%s]', - 'Platform', plt_file) + self._log.debug('Dump platform descriptor in [%s]', plt_file) with open(plt_file, 'w') as ofile: json.dump(self.platform, ofile, sort_keys=True, indent=4) return (self.platform, plt_file) @@ -692,25 +673,23 @@ class TestEnv(ShareState): required = any(['rt-app' in wl['type'] for wl in wloads]) if not required: - logging.debug('No RT-App workloads, skipping calibration') + self._log.debug('No RT-App workloads, skipping calibration') return if not force and 'rtapp-calib' in self.conf: - logging.warning( - r'%14s - Using configuration provided RTApp calibration', - 'Target') + self._log.warning('Using configuration provided RTApp calibration') self._calib = { int(key): int(value) for key, value in self.conf['rtapp-calib'].items() } else: - logging.info(r'%14s - Calibrating RTApp...', 'Target') + self._log.info('Calibrating RTApp...') self._calib = RTA.calibrate(self.target) - logging.info(r'%14s - Using RT-App calibration values:', 'Target') - logging.info(r'%14s - %s', 'Target', - "{" + ", ".join('"%r": %r' % (key, self._calib[key]) - for key in sorted(self._calib)) + "}") + self._log.info('Using RT-App calibration values:') + self._log.info(' %s', + "{" + ", ".join('"%r": %r' % (key, self._calib[key]) + for key in sorted(self._calib)) + "}") return self._calib def resolv_host(self, host=None): @@ -718,7 +697,7 @@ class TestEnv(ShareState): host = self.conf['host'] # Refresh ARP for local network IPs - logging.debug('%14s - Collecting all Bcast address', 'HostResolver') + self._log.debug('Collecting all Bcast address') output = os.popen(r'ifconfig').read().split('\n') for line in output: match = IFCFG_BCAST_RE.search(line) @@ -727,12 +706,12 @@ class TestEnv(ShareState): baddr = match.group(1) try: cmd = r'nmap -T4 -sP {}/24 &>/dev/null'.format(baddr.strip()) - logging.debug('%14s - %s', 'HostResolver', cmd) + self._log.debug(cmd) os.popen(cmd) except RuntimeError: - logging.warning('%14s - Nmap not available, try IP lookup using broadcast ping') + self._log.warning('Nmap not available, try IP lookup using broadcast ping') cmd = r'ping -b -c1 {} &>/dev/null'.format(baddr) - logging.debug('%14s - %s', 'HostResolver', cmd) + self._log.debug(cmd) os.popen(cmd) return self.parse_arp_cache(host) @@ -777,14 +756,13 @@ class TestEnv(ShareState): if not ipaddr or not macaddr: raise ValueError('Unable to lookup for target IP/MAC address') - logging.info('%14s - Target (%s) at IP address: %s', - 'HostResolver', macaddr, ipaddr) + self._log.info('Target (%s) at IP address: %s', macaddr, ipaddr) return (macaddr, ipaddr) def reboot(self, reboot_time=120, ping_time=15): # Send remote target a reboot command if self._feature('no-reboot'): - logging.warning('%14s - Reboot disabled by conf features', 'Reboot') + self._log.warning('Reboot disabled by conf features') else: if 'reboot_time' in self.conf: reboot_time = int(self.conf['reboot_time']) @@ -799,27 +777,28 @@ class TestEnv(ShareState): self.target.execute('sleep 2 && reboot -f &', as_root=True) # Wait for the target to complete the reboot - logging.info('%14s - Waiting up to %s[s] for target [%s] to reboot...', - 'Reboot', reboot_time, self.ip) + self._log.info('Waiting up to %s[s] for target [%s] to reboot...', + reboot_time, self.ip) ping_cmd = "ping -c 1 {} >/dev/null".format(self.ip) elapsed = 0 start = time.time() while elapsed <= reboot_time: time.sleep(ping_time) - logging.debug('%14s - Trying to connect to [%s] target...', - 'Reboot', self.ip) + self._log.debug('Trying to connect to [%s] target...', self.ip) if os.system(ping_cmd) == 0: break elapsed = time.time() - start if elapsed > reboot_time: if self.mac: - logging.warning('%14s - target [%s] not responding to \ - PINGs, trying to resolve MAC address...', 'Reboot', self.ip) + self._log.warning('target [%s] not responding to PINGs, ' + 'trying to resolve MAC address...', + self.ip) (self.mac, self.ip) = self.resolv_host(self.mac) else: - logging.warning('%14s - target [%s] not responding to PINGs, trying to continue...', - 'Reboot', self.ip) + self._log.warning('target [%s] not responding to PINGs, ' + 'trying to continue...', + self.ip) # Force re-initialization of all the devlib modules force = True @@ -842,16 +821,14 @@ class TestEnv(ShareState): if self.kernel == tc['kernel'] and self.dtb == tc['dtb']: return - logging.info('%14s - Install kernel [%s] on target...', - 'KernelSetup', tc['kernel']) + self._log.info('Install kernel [%s] on target...', tc['kernel']) # Install kernel/dtb via FTFP if self._feature('no-kernel'): - logging.warning('%14s - Kernel deploy disabled by conf features', - 'KernelSetup') + self._log.warning('Kernel deploy disabled by conf features') elif 'tftp' in self.conf: - logging.info('%14s - Deploy kernel via TFTP...', 'KernelSetup') + self._log.info('Deploy kernel via TFTP...') # Deploy kernel in TFTP folder (mandatory) if 'kernel' not in tc or not tc['kernel']: @@ -861,10 +838,9 @@ class TestEnv(ShareState): # Deploy DTB in TFTP folder (if provided) if 'dtb' not in tc or not tc['dtb']: - logging.debug('%14s - DTB not provided, using existing one', - 'KernelSetup') - logging.debug('%14s - Current conf:\n%s', 'KernelSetup', tc) - logging.warn('%14s - Using pre-installed DTB', 'KernelSetup') + self._log.debug('DTB not provided, using existing one') + self._log.debug('Current conf:\n%s', tc) + self._log.warning('Using pre-installed DTB') else: self.tftp_deploy(tc['dtb']) @@ -881,7 +857,7 @@ class TestEnv(ShareState): return # Reboot target - logging.info('%14s - Rebooting taget...', 'KernelSetup') + self._log.info('Rebooting taget...') self.reboot() @@ -898,12 +874,10 @@ class TestEnv(ShareState): dst = os.path.join(dst, os.path.basename(src)) cmd = 'cp {} {} && sync'.format(src, dst) - logging.info('%14s - Deploy %s into %s', - 'TFTP', src, dst) + self._log.info('Deploy %s into %s', src, dst) result = os.system(cmd) if result != 0: - logging.error('%14s - Failed to deploy image: %s', - 'FTFP', src) + self._log.error('Failed to deploy image: %s', src) raise ValueError('copy error') def _feature(self, feature): diff --git a/libs/utils/executor.py b/libs/utils/executor.py index a93eaf6bc..8bef1419f 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -28,11 +28,6 @@ import trappy # Configure logging import logging -reload(logging) -logging.basicConfig( - format='%(asctime)-9s %(levelname)-8s: %(message)s', - level=logging.INFO, - datefmt='%I:%M:%S') # Add JSON parsing support from conf import JsonConf @@ -82,14 +77,15 @@ class Executor(): self._default_cgroup = None self._cgroup = None + # Setup logging + self._log = logging.getLogger('Executor') + # Setup test configuration if isinstance(experiments_conf, dict): - logging.info('%14s - Loading custom (inline) test configuration', - 'Target') + self._log.info('Loading custom (inline) test configuration') self._experiments_conf = experiments_conf elif isinstance(experiments_conf, str): - logging.info('%14s - Loading custom (file) test configuration', - 'Target') + self._log.info('Loading custom (file) test configuration') json_conf = JsonConf(experiments_conf) self._experiments_conf = json_conf.load() else: @@ -113,30 +109,29 @@ class Executor(): * len(self._experiments_conf['wloads']) \ * len(self._experiments_conf['confs']) - self._print_section('Executor', 'Experiments configuration') + self._print_section('Experiments configuration') - logging.info('%14s - Configured to run:', 'Executor') + self._log.info('Configured to run:') - logging.info('%14s - %3d target configurations:', - 'Executor', len(self._experiments_conf['confs'])) + self._log.info(' %3d target configurations:', + len(self._experiments_conf['confs'])) target_confs = [conf['tag'] for conf in self._experiments_conf['confs']] target_confs = ', '.join(target_confs) - logging.info('%14s - %s', 'Executor', target_confs) + self._log.info(' %s', target_confs) - logging.info('%14s - %3d workloads (%d iterations each)', - 'Executor', len(self._experiments_conf['wloads']), - self._iterations) + self._log.info(' %3d workloads (%d iterations each)', + len(self._experiments_conf['wloads']), + self._iterations) wload_confs = ', '.join(self._experiments_conf['wloads']) - logging.info('%14s - %s', 'Executor', wload_confs) + self._log.info(' %s', wload_confs) - logging.info('%14s - Total: %d experiments', - 'Executor', self._exp_count) + self._log.info('Total: %d experiments', self._exp_count) - logging.info('%14s - Results will be collected under:', 'Executor') - logging.info('%14s - %s', 'Executor', self.te.res_dir) + self._log.info('Results will be collected under:') + self._log.info(' %s', self.te.res_dir) def run(self): - self._print_section('Executor', 'Experiments execution') + self._print_section('Experiments execution') self.experiments = [] @@ -163,9 +158,9 @@ class Executor(): exp_idx += 1 self._target_cleanup(tc) - self._print_section('Executor', 'Experiments execution completed') - logging.info('%14s - Results available in:', 'Executor') - logging.info('%14s - %s', 'Executor', self.te.res_dir) + self._print_section('Experiments execution completed') + self._log.info('Results available in:') + self._log.info(' %s', self.te.res_dir) ################################################################################ @@ -179,15 +174,14 @@ class Executor(): if 'cgroups' not in self.target.modules: raise RuntimeError('CGroups module not available. Please ensure ' '"cgroups" is listed in your target/test modules') - logging.info(r'%14s - Initialize CGroups support...', 'CGroups') + self._log.info('Initialize CGroups support...') errors = False for kind in tc['cgroups']['conf']: - logging.info(r'%14s - Setup [%s] controller...', - 'CGroups', kind) + self._log.info('Setup [%s] CGroup controller...', kind) controller = self.target.cgroups.controller(kind) if not controller: - logging.warning(r'%14s - CGroups controller [%s] NOT available', - 'CGroups', kind) + self._log.warning('CGroups controller [%s] NOT available', + kind) errors = True return not errors @@ -199,12 +193,11 @@ class Executor(): def _setup_sched_features(self, tc): if 'sched_features' not in tc: - logging.debug('%14s - Configuration not provided', 'SchedFeatures') + self._log.debug('Scheduler features configuration not provided') return feats = tc['sched_features'].split(",") for feat in feats: - logging.info('%14s - Set scheduler feature: %s', - 'SchedFeatures', feat) + self._log.info('Set scheduler feature: %s', feat) self.target.execute('echo {} > /sys/kernel/debug/sched_features'.format(feat), as_root=True) @@ -215,8 +208,7 @@ class Executor(): self.te.run_dir = os.path.join( self.target.working_directory, TGT_RUN_DIR) # Create run folder as tmpfs - logging.debug('%14s - Setup RT-App run folder [%s]...', - 'TargetSetup', self.te.run_dir) + self._log.debug('Setup RT-App run folder [%s]...', self.te.run_dir) self.target.execute('[ -d {0} ] || mkdir {0}'\ .format(self.te.run_dir)) self.target.execute( @@ -235,26 +227,23 @@ class Executor(): # Probably the target doesn't have SELinux. No problem. self._old_selinux_mode = None else: - logging.warning('%14s - Setting target SELinux in permissive mode', - 'Executor') + self._log.warning('Setting target SELinux in permissive mode') self.target.execute('setenforce 0', as_root=True) def _setup_cpufreq(self, tc): if 'cpufreq' not in tc: - logging.warning(r'%14s - governor not specified, '\ - 'using currently configured governor', - 'CPUFreq') + self._log.warning('cpufreq governor not specified, ' + 'using currently configured governor') return cpufreq = tc['cpufreq'] - logging.info(r'%14s - Configuring all CPUs to use [%s] governor', - 'CPUFreq', cpufreq['governor']) + self._log.info('Configuring all CPUs to use [%s] cpufreq governor', + cpufreq['governor']) self.target.cpufreq.set_all_governors(cpufreq['governor']) if 'params' in cpufreq: - logging.info(r'%14s - governor params: %s', - 'CPUFreq', str(cpufreq['params'])) + self._log.info('governor params: %s', str(cpufreq['params'])) for cpu in self.target.list_online_cpus(): self.target.cpufreq.set_governor_tunables( cpu, @@ -274,9 +263,9 @@ class Executor(): for kind in tc['cgroups']['conf']: controller = self.target.cgroups.controller(kind) if not controller: - logging.warning(r'%14s - Configuration error: '\ - '[%s] contoller NOT supported', - 'CGroups', kind) + self._log.warning('Configuration error: ' + '[%s] contoller NOT supported', + kind) errors = True continue self._setup_controller(tc, controller) @@ -289,13 +278,13 @@ class Executor(): for name in tc['cgroups']['conf'][controller.kind]: if name[0] != '/': raise ValueError('Wrong CGroup name [{}]. ' - 'CGroups names must start by "/".'\ + 'CGroups names must start by "/".' .format(name)) group = controller.cgroup(name) if not group: - logging.warning(r'%14s - Configuration error: '\ - '[%s/%s] cgroup NOT available', - 'CGroups', kind, name) + self._log.warning('Configuration error: ' + '[%s/%s] cgroup NOT available', + kind, name) errors = True continue self._setup_group(tc, group) @@ -308,8 +297,8 @@ class Executor(): group.set(**tc['cgroups']['conf'][kind][name]) def _target_configure(self, tc): - self._print_header('TargetConfig', - r'configuring target for [{}] experiments'\ + self._print_header( + 'configuring target for [{}] experiments'\ .format(tc['tag'])) self._setup_kernel(tc) self._setup_sched_features(tc) @@ -321,14 +310,14 @@ class Executor(): has_flag = False else: has_flag = flag in tc['flags'] - logging.debug('%14s - Check if target conf [%s] has flag [%s]: %s', - 'TargetConf', tc['tag'], flag, has_flag) + self._log.debug('Check if target configuration [%s] has flag [%s]: %s', + tc['tag'], flag, has_flag) return has_flag def _target_cleanup(self, tc): if self._old_selinux_mode is not None: - logging.info('%14s - Restoring target SELinux mode: %s', - 'Executor', self._old_selinux_mode) + self._log.info('Restoring target SELinux mode: %s', + self._old_selinux_mode) self.target.execute('setenforce ' + self._old_selinux_mode, as_root=True) @@ -392,8 +381,7 @@ class Executor(): def _wload_rtapp(self, wl_idx, wlspec, cpus): conf = wlspec['conf'] - logging.debug(r'%14s - Configuring [%s] rt-app...', - 'RTApp', conf['class']) + self._log.debug('Configuring [%s] rt-app...', conf['class']) # Setup a default "empty" task name prefix if 'prefix' not in conf: @@ -409,8 +397,8 @@ class Executor(): # Load each task specification for task_name, task in conf['params'].items(): if task['kind'] not in wlgen.__dict__: - logging.error(r'%14s - RTA task of kind [%s] not supported', - 'RTApp', task['kind']) + self._log.error('RTA task of kind [%s] not supported', + task['kind']) raise ValueError('Configuration error - ' 'unsupported \'kind\' value for task [{}] '\ 'in RT-App workload specification'\ @@ -458,8 +446,7 @@ class Executor(): def _wload_perf_bench(self, wl_idx, wlspec, cpus): conf = wlspec['conf'] - logging.debug(r'%14s - Configuring perf_message...', - 'PerfMessage') + self._log.debug('Configuring perf_message...') if conf['class'] == 'messaging': perf_bench = wlgen.PerfMessaging(self.target, wl_idx) @@ -529,18 +516,18 @@ class Executor(): wload = experiment.wload tc_idx = tc['tag'] - self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\ + self._print_title('Experiment {}/{}, [{}:{}] {}/{}'\ .format(exp_idx, self._exp_count, tc_idx, experiment.wload_name, experiment.iteration, self._iterations)) # Setup local results folder - logging.debug(r'%14s - out_dir [%s]', 'Executor', experiment.out_dir) + self._log.debug('out_dir set to [%s]', experiment.out_dir) os.system('mkdir -p ' + experiment.out_dir) # FTRACE: start (if a configuration has been provided) if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): - logging.warning('%14s - FTrace events collection enabled', 'Executor') + self._log.warning('FTrace events collection enabled') self.te.ftrace.start() # ENERGY: start sampling @@ -560,41 +547,41 @@ class Executor(): trace_file = experiment.out_dir + '/trace.dat' self.te.ftrace.get_trace(trace_file) - logging.info(r'%14s - Collected FTrace binary trace:', 'Executor') - logging.info(r'%14s - %s', 'Executor', - trace_file.replace(self.te.res_dir, '')) + self._log.info('Collected FTrace binary trace:') + self._log.info(' %s', + trace_file.replace(self.te.res_dir, '')) stats_file = experiment.out_dir + '/trace_stat.json' self.te.ftrace.get_stats(stats_file) - logging.info(r'%14s - Collected FTrace function profiling:', 'Executor') - logging.info(r'%14s - %s', 'Executor', - stats_file.replace(self.te.res_dir, '')) + self._log.info('Collected FTrace function profiling:') + self._log.info(' %s', + stats_file.replace(self.te.res_dir, '')) - self._print_footer('Executor') + self._print_footer() ################################################################################ # Utility Functions ################################################################################ - def _print_section(self, tag, message): - logging.info('') - logging.info(FMT_SECTION) - logging.info(r'%14s - %s', tag, message) - logging.info(FMT_SECTION) + def _print_section(self, message): + self._log.info('') + self._log.info(FMT_SECTION) + self._log.info(message) + self._log.info(FMT_SECTION) - def _print_header(self, tag, message): - logging.info('') - logging.info(FMT_HEADER) - logging.info(r'%14s - %s', tag, message) + def _print_header(self, message): + self._log.info('') + self._log.info(FMT_HEADER) + self._log.info(message) - def _print_title(self, tag, message): - logging.info(FMT_TITLE) - logging.info(r'%14s - %s', tag, message) + def _print_title(self, message): + self._log.info(FMT_TITLE) + self._log.info(message) - def _print_footer(self, tag, message=None): + def _print_footer(self, message=None): if message: - logging.info(r'%14s - %s', tag, message) - logging.info(FMT_FOOTER) + self._log.info(message) + self._log.info(FMT_FOOTER) ################################################################################ diff --git a/libs/utils/perf_analysis.py b/libs/utils/perf_analysis.py index f6c34b306..1f3256e39 100644 --- a/libs/utils/perf_analysis.py +++ b/libs/utils/perf_analysis.py @@ -25,8 +25,6 @@ import pylab as pl import re import sys import trappy - -# Configure logging import logging # Regexp to match an rt-app generated logfile @@ -42,6 +40,9 @@ class PerfAnalysis(object): # Folder containing all rt-app data self.datadir = None + # Setup logging + self._log = logging.getLogger('PerfAnalysis') + # Load performance data generated by rt-app workloads self.__loadRTAData(datadir, tasks) @@ -106,18 +107,18 @@ class PerfAnalysis(object): task_name = self.__taskNameFromLog(logfile) self.perf_data[task_name] = {} self.perf_data[task_name]['logfile'] = logfile - logging.debug('Found rt-app logfile for task [%s]', task_name) + self._log.debug('Found rt-app logfile for task [%s]', task_name) else: # Lookup for specified rt-app task logfile into specified datadir for task in tasks: logfile = self.__logfileFromTaskName(task) self.perf_data[task_name] = {} self.perf_data[task_name]['logfile'] = logfile - logging.debug('Found rt-app logfile for task [%s]', task_name) + self._log.debug('Found rt-app logfile for task [%s]', task_name) # Load all the found logfile into a dataset for task in self.perf_data.keys(): - logging.debug('Loading dataframe for task [%s]...', task) + self._log.debug('Loading dataframe for task [%s]...', task) df = pd.read_table(self.logfile(task), sep='\s+', skiprows=1, @@ -174,7 +175,7 @@ class PerfAnalysis(object): # axes.set_xlim(x_min, x_max); pindex_avg = data.mean()[0]; pindex_std = data.std()[0]; - logging.info('PerfIndex, Task [%s] avg: %.2f, std: %.2f', + self._log.info('PerfIndex, Task [%s] avg: %.2f, std: %.2f', task, pindex_avg, pindex_std) axes.axvline(pindex_avg, color='b', linestyle='--', linewidth=2); diff --git a/libs/utils/report.py b/libs/utils/report.py index a0600d992..c38973e1d 100644 --- a/libs/utils/report.py +++ b/libs/utils/report.py @@ -24,19 +24,12 @@ import numpy as np import os import re import sys +import logging from collections import defaultdict from colors import TestColors from results import Results -# Configure logging -import logging -reload(logging) -logging.basicConfig( - format='%(asctime)-9s %(levelname)-8s: %(message)s', - # level=logging.DEBUG, - level=logging.INFO, - datefmt='%I:%M:%S') # By default compare all the possible combinations DEFAULT_COMPARE = [(r'base_', r'test_')] @@ -50,24 +43,27 @@ class Report(object): self.compare = [] + # Setup logging + self._log = logging.getLogger('Report') + # Parse results (if required) if not os.path.isfile(self.results_json): Results(results_dir) # Load results from file (if already parsed) - logging.info('%14s - Load results from [%s]...', - 'Results', self.results_json) + self._log.info('Load results from [%s]...', + self.results_json) with open(self.results_json) as infile: self.results = json.load(infile) # Setup configuration comparisons if compare is None: compare = DEFAULT_COMPARE - logging.warning('%14s - Comparing all the possible combination', - 'Results') + self._log.warning('Comparing all the possible combination') for (base_rexp, test_rexp) in compare: - logging.info('Configured regexps for comparisions (bases , tests): (%s, %s)', - base_rexp, test_rexp) + self._log.info('Configured regexps for comparisions ' + '(bases , tests): (%s, %s)', + base_rexp, test_rexp) base_rexp = re.compile(base_rexp, re.DOTALL) test_rexp = re.compile(test_rexp, re.DOTALL) self.compare.append((base_rexp, test_rexp)) @@ -81,23 +77,23 @@ class Report(object): def __rtapp_report(self, formats): if 'rtapp' not in self.results.keys(): - logging.debug('%14s - No RTApp workloads to report', 'ReportRTApp') + self._log.debug('No RTApp workloads to report') return - logging.debug('%14s - Reporting RTApp workloads', 'ReportRTApp') + self._log.debug('Reporting RTApp workloads') # Setup lables depending on requested report if 'absolute' in formats: nrg_lable = 'Energy Indexes (Absolute)' prf_lable = 'Performance Indexes (Absolute)' - logging.info('') - logging.info('%14s - Absolute comparisions:', 'Report') + self._log.info('') + self._log.info('Absolute comparisions:') print '' else: nrg_lable = 'Energy Indexes (Relative)' prf_lable = 'Performance Indexes (Relative)' - logging.info('') - logging.info('%14s - Relative comparisions:', 'Report') + self._log.info('') + self._log.info('Relative comparisions:') print '' # Dump headers @@ -140,8 +136,8 @@ class Report(object): def __rtapp_reference(self, tid, base_idx): _results = self.results['rtapp'] - logging.debug('Test %s: compare against [%s] base', - tid, base_idx) + self._log.debug('Test %s: compare against [%s] base', + tid, base_idx) res_line = '{0:12s}: {1:22s} | '.format(tid, base_idx) # Dump all energy metrics @@ -159,7 +155,7 @@ class Report(object): for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] - logging.debug('idx: %s, base: %s', pidx, res_base) + self._log.debug('idx: %s, base: %s', pidx, res_base) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_base)) @@ -177,8 +173,8 @@ class Report(object): def __rtapp_compare(self, tid, base_idx, test_idx, formats): _results = self.results['rtapp'] - logging.debug('Test %s: compare %s with %s', - tid, base_idx, test_idx) + self._log.debug('Test %s: compare %s with %s', + tid, base_idx, test_idx) res_line = '{0:12s}: {1:20s} | '.format(tid, test_idx) # Dump all energy metrics @@ -207,8 +203,8 @@ class Report(object): res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] - logging.debug('idx: %s, base: %s, test: %s', - pidx, res_base, res_test) + self._log.debug('idx: %s, base: %s, test: %s', + pidx, res_base, res_test) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_test)) @@ -241,23 +237,23 @@ class Report(object): wtypes.append(supported_wtype) if len(wtypes) == 0: - logging.debug('%14s - No Default workloads to report', 'ReportDefault') + self._log.debug('No Default workloads to report') return - logging.debug('%14s - Reporting Default workloads', 'ReportDefault') + self._log.debug('Reporting Default workloads') # Setup lables depending on requested report if 'absolute' in formats: nrg_lable = 'Energy Indexes (Absolute)' prf_lable = 'Performance Indexes (Absolute)' - logging.info('') - logging.info('%14s - Absolute comparisions:', 'Report') + self._log.info('') + self._log.info('Absolute comparisions:') print '' else: nrg_lable = 'Energy Indexes (Relative)' prf_lable = 'Performance Indexes (Relative)' - logging.info('') - logging.info('%14s - Relative comparisions:', 'Report') + self._log.info('') + self._log.info('Relative comparisions:') print '' # Dump headers @@ -300,8 +296,8 @@ class Report(object): def __default_compare(self, wtype, tid, base_idx, test_idx, formats): _results = self.results[wtype] - logging.debug('Test %s: compare %s with %s', - tid, base_idx, test_idx) + self._log.debug('Test %s: compare %s with %s', + tid, base_idx, test_idx) res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx) res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp) @@ -340,8 +336,8 @@ class Report(object): res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] - logging.debug('idx: %s, base: %s, test: %s', - pidx, res_base, res_test) + self._log.debug('idx: %s, base: %s, test: %s', + pidx, res_base, res_test) # Compute difference base-vs-test speedup_cnt = 0 diff --git a/libs/utils/results.py b/libs/utils/results.py index 5bc71dbd4..913214896 100644 --- a/libs/utils/results.py +++ b/libs/utils/results.py @@ -24,11 +24,12 @@ import numpy as np import os import re import sys +import logging from collections import defaultdict from colors import TestColors -import logging + class Results(object): @@ -37,6 +38,9 @@ class Results(object): self.results_json = results_dir + '/results.json' self.results = {} + # Setup logging + self._log = logging.getLogger('Results') + # Do nothing if results have been already parsed if os.path.isfile(self.results_json): return @@ -45,7 +49,7 @@ class Results(object): self.base_wls = defaultdict(list) self.test_wls = defaultdict(list) - logging.info('%14s - Loading energy/perf data...', 'Parser') + self._log.info('Loading energy/perf data...') for test_idx in sorted(os.listdir(self.results_dir)): @@ -57,8 +61,8 @@ class Results(object): test.parse() results_json = self.results_dir + '/results.json' - logging.info('%14s - Dump perf results on JSON file [%s]...', - 'Parser', results_json) + self._log.info('Dump perf results on JSON file [%s]...', + results_json) with open(results_json, 'w') as outfile: json.dump(self.results, outfile, indent=4, sort_keys=True) @@ -74,10 +78,8 @@ class Test(object): self.res = res match = TEST_DIR_RE.search(test_dir) if not match: - logging.error('%14s - Results folder not matching naming template', - 'TestParser') - logging.error('%14s - Skip parsing of test results [%s]', - 'TestParser', test_dir) + self._log.error('Results folder not matching naming template') + self._log.error('Skip parsing of test results [%s]', test_dir) return # Create required JSON entries @@ -103,8 +105,7 @@ class Test(object): def parse(self): - logging.info('%14s - Processing results from wtype [%s]', - 'TestParser', self.wtype) + self._log.info('Processing results from wtype [%s]', self.wtype) # Parse test's run results for run_idx in sorted(os.listdir(self.test_dir)): @@ -146,10 +147,8 @@ class TestFactory(object): # Retrive workload class from results folder name match = TEST_DIR_RE.search(test_dir) if not match: - logging.error('%14s - Results folder not matching naming template', - 'TestParser') - logging.error('%14s - Skip parsing of test results [%s]', - 'TestParser', test_dir) + self._log.error('Results folder not matching naming template') + self._log.error('Skip parsing of test results [%s]', test_dir) return # Create workload specifi test class @@ -170,7 +169,7 @@ class Energy(object): self.big = 0.0 self.total = 0.0 - logging.debug('%14s - Parse [%s]...', 'Energy', nrg_file) + self._log.debug('Parse [%s]...', nrg_file) with open(nrg_file, 'r') as infile: nrg = json.load(infile) @@ -181,8 +180,8 @@ class Energy(object): self.big = float(nrg['big']) self.total = self.little + self.big - logging.debug('%14s - Energy LITTLE [%s], big [%s], Total [%s]', - 'Energy', self.little, self.big, self.total) + self._log.debug('Energy LITTLE [%s], big [%s], Total [%s]', + self.little, self.big, self.total) class Stats(object): @@ -232,7 +231,7 @@ class Run(object): self.run_idx = run_idx self.nrg = None - logging.debug('%14s - Parse [%s]...', 'Run', run_dir) + self._log.debug('Parse [%s]...', 'Run', run_dir) # Energy stats self.little_nrg = 0 @@ -358,7 +357,7 @@ class RTAppPerf(object): 'edp3' : 0 } - logging.debug('%14s - Parse [%s]...', 'Perf', perf_file) + self._log.debug('Parse [%s]...', perf_file) # Load performance data for each RT-App task self.name = perf_file.split('-')[-2] @@ -373,32 +372,33 @@ class RTAppPerf(object): perf = np.multiply(perf, 100) self.prf['perf_avg'] = np.mean(perf) self.prf['perf_std'] = np.std(perf) - # logging.debug('perf [%s]: %6.2f,%6.2f', - # self.name, self.prf['perf_mean'], self.prf['perf_std']) + # self._log.debug('perf [%s]: %6.2f,%6.2f', + # self.name, self.prf['perf_mean'], + # self.prf['perf_std']) # Negative slacks nslacks = self.data[:,RTAPP_COL_SLACK] nslacks = nslacks[nslacks < 0] - # logging.debug('Negative Slacks: %s', nslacks) + # self._log.debug('Negative Slacks: %s', nslacks) self.prf['slack_sum'] = -nslacks.sum() - # logging.debug('Negative slack [%s] sum: %6.2f', - # self.name, self.prf['slack_sum']) + # self._log.debug('Negative slack [%s] sum: %6.2f', + # self.name, self.prf['slack_sum']) # Slack over run-time self.prf['run_sum'] = np.sum(self.data[:,RTAPP_COL_RUN]) self.prf['slack_pct'] = 100 * self.prf['slack_sum'] / self.prf['run_sum'] - # logging.debug('SlackPct [%s]: %6.2f %%', self.name, self.slack_pct) + # self._log.debug('SlackPct [%s]: %6.2f %%', self.name, self.slack_pct) if nrg is None: return # Computing EDP self.prf['edp1'] = nrg.total * math.pow(self.prf['run_sum'], 1) - # logging.debug('EDP1 [%s]: {%6.2f}', self.name, self.prf['edp1']) + # self._log.debug('EDP1 [%s]: {%6.2f}', self.name, self.prf['edp1']) self.prf['edp2'] = nrg.total * math.pow(self.prf['run_sum'], 2) - # logging.debug('EDP2 [%s]: %6.2f', self.name, self.prf['edp2']) + # self._log.debug('EDP2 [%s]: %6.2f', self.name, self.prf['edp2']) self.prf['edp3'] = nrg.total * math.pow(self.prf['run_sum'], 3) - # logging.debug('EDP3 [%s]: %6.2f', self.name, self.prf['edp3']) + # self._log.debug('EDP3 [%s]: %6.2f', self.name, self.prf['edp3']) # Columns of the per-task rt-app log file @@ -466,8 +466,8 @@ class DefaultRun(Run): # Load default performance.json prf_file = os.path.join(run_dir, 'performance.json') if not os.path.isfile(prf_file): - logging.warning('%14s - No performance.json found in %s', - 'Perf', run_dir) + self._log.warning('No performance.json found in %s', + run_dir) return # Load performance report from JSON diff --git a/libs/utils/test.py b/libs/utils/test.py index 6e037a3e3..3a658ee5f 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -15,9 +15,9 @@ # limitations under the License. # -import logging import os import unittest +import logging from bart.sched.SchedAssert import SchedAssert from bart.sched.SchedMultiAssert import SchedMultiAssert @@ -27,6 +27,7 @@ import wrapt from env import TestEnv from executor import Executor + class LisaTest(unittest.TestCase): """ A base class for LISA tests @@ -57,12 +58,7 @@ class LisaTest(unittest.TestCase): Set up logging and trigger running experiments """ - cls.logger = logging.getLogger('test') - cls.logger.setLevel(logging.INFO) - if 'loglevel' in kwargs: - cls.logger.setLevel(kwargs['loglevel']) - kwargs.pop('loglevel') - + cls.logger = logging.getLogger('LisaTest') cls._runExperiments() @classmethod @@ -91,7 +87,7 @@ class LisaTest(unittest.TestCase): Default experiments execution engine """ - cls.logger.info("%14s - Setup tests execution engine...", "LisaTest") + cls.logger.info('Setup tests execution engine...') test_env = TestEnv(test_conf=cls._getTestConf()) experiments_conf = cls._getExperimentsConf(test_env) @@ -104,7 +100,7 @@ class LisaTest(unittest.TestCase): # Execute pre-experiments code defined by the test cls._experimentsInit() - cls.logger.info("%14s - Experiments execution...", "LisaTest") + cls.logger.info('Experiments execution...') cls.executor.run() # Execute post-experiments code defined by the test diff --git a/libs/utils/trace.py b/libs/utils/trace.py index 5598252e6..f296a3c10 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -25,14 +25,13 @@ import trappy import json import warnings import operator +import logging from analysis_register import AnalysisRegister from collections import namedtuple from devlib.utils.misc import memoized from trappy.utils import listify -# Configure logging -import logging NON_IDLE_STATE = 4294967295 ResidencyTime = namedtuple('ResidencyTime', ['total', 'active']) @@ -118,6 +117,9 @@ class Trace(object): # Folder containing all trace data self.data_dir = None + # Setup logging + self._log = logging.getLogger('Trace') + # Folder containing trace if not os.path.isdir(data_dir): self.data_dir = os.path.dirname(data_dir) @@ -157,13 +159,13 @@ class Trace(object): :param module: module to which the function is added :type module: class """ - logging.debug("Registering [%s] local data frames", module) + self._log.debug('Registering [%s] local data frames', module) for func in dir(module): if not func.startswith('_dfg_'): continue dfg_name = func.replace('_dfg_', '') dfg_func = getattr(module, func) - logging.debug(" %s", dfg_name) + self._log.debug(' %s', dfg_name) setattr(self.data_frame, dfg_name, dfg_func) def setXTimeRange(self, t_min=None, t_max=None): @@ -184,8 +186,8 @@ class Trace(object): self.x_max = self.time_range else: self.x_max = t_max - logging.info('Set plots time range to (%.6f, %.6f)[s]', - self.x_min, self.x_max) + self._log.info('Set plots time range to (%.6f, %.6f)[s]', + self.x_min, self.x_max) def __registerTraceEvents(self, events): """ @@ -223,14 +225,14 @@ class Trace(object): - SysTrace :type trace_format: str """ - logging.debug('Loading [sched] events from trace in [%s]...', path) - logging.debug("Parsing events: %s", self.events) + self._log.debug('Loading [sched] events from trace in [%s]...', path) + self._log.debug('Parsing events: %s', self.events) if trace_format.upper() == 'SYSTRACE' or path.endswith('html'): - logging.info('Parsing SysTrace format...') + self._log.info('Parsing SysTrace format...') trace_class = trappy.SysTrace self.trace_format = 'SysTrace' elif trace_format.upper() == 'FTRACE': - logging.info('Parsing FTrace format...') + self._log.info('Parsing FTrace format...') trace_class = trappy.FTrace self.trace_format = 'FTrace' else: @@ -246,7 +248,7 @@ class Trace(object): self.__checkAvailableEvents() if len(self.available_events) == 0: if has_function_stats: - logging.info('Trace contains only functions stats') + self._log.info('Trace contains only functions stats') return raise ValueError('The trace does not contain useful events ' 'nor function stats') @@ -285,9 +287,9 @@ class Trace(object): obj = getattr(self.ftrace, val) if len(obj.data_frame): self.available_events.append(val) - logging.debug('Events found on trace:') + self._log.debug('Events found on trace:') for evt in self.available_events: - logging.debug(' - %s', evt) + self._log.debug(' - %s', evt) def __loadTasksNames(self, tasks): """ @@ -308,7 +310,7 @@ class Trace(object): elif 'sched_load_avg_task' in self.available_events: load(tasks, 'sched_load_avg_task', 'comm', 'pid') else: - logging.warning('Failed to load tasks names from trace events') + self._log.warning('Failed to load tasks names from trace events') def hasEvents(self, dataset): """ @@ -339,8 +341,8 @@ class Trace(object): te = df.index[-1] self.time_range = te - ts - logging.info('Collected events spans a %.3f [s] time interval', - self.time_range) + self._log.info('Collected events spans a %.3f [s] time interval', + self.time_range) # Build a stat on trace overutilization if self.hasEvents('sched_overutilized'): @@ -348,8 +350,8 @@ class Trace(object): self.overutilized_time = df[df.overutilized == 1].len.sum() self.overutilized_prc = 100. * self.overutilized_time / self.time_range - logging.info('Overutilized time: %.6f [s] (%.3f%% of trace time)', - self.overutilized_time, self.overutilized_prc) + self._log.info('Overutilized time: %.6f [s] (%.3f%% of trace time)', + self.overutilized_time, self.overutilized_prc) def _scanTasks(self, df, name_key='comm', pid_key='pid'): """ @@ -433,23 +435,23 @@ class Trace(object): if dataframe is None: return {k: v for k, v in self.tasks.iteritems() if k in task_names} df = dataframe - logging.debug("Lookup dataset for tasks...") + self._log.debug('Lookup dataset for tasks...') for tname in task_names: - logging.debug("Lookup for task [%s]...", tname) + self._log.debug('Lookup for task [%s]...', tname) results = df[df[name_key] == tname][[name_key, pid_key]] if len(results) == 0: - logging.error(' task %16s NOT found', tname) + self._log.error(' task %16s NOT found', tname) continue (name, pid) = results.head(1).values[0] if name != tname: - logging.error(' task %16s NOT found', tname) + self._log.error(' task %16s NOT found', tname) continue if tname not in self.tasks: self.tasks[tname] = {} pids = list(results[pid_key].unique()) self.tasks[tname]['pid'] = pids - logging.debug(' task %16s found, pid: %s', - tname, self.tasks[tname]['pid']) + self._log.debug(' task %16s found, pid: %s', + tname, self.tasks[tname]['pid']) return self.tasks @@ -677,12 +679,12 @@ class Trace(object): for chunk in self._chunker(cluster_df, len(cpus)): f = chunk.iloc[0].frequency if any(chunk.frequency != f): - logging.warn('Cluster Frequency is not coherent! ' - 'Failure in [cpu_frequency] events at:') - logging.warn(chunk) + self._log.warning('Cluster Frequency is not coherent! ' + 'Failure in [cpu_frequency] events at:') + self._log.warning(chunk) self.freq_coherency = False return - logging.info("Platform clusters verified to be Frequency coherent") + self._log.info('Platform clusters verified to be Frequency coherent') ############################################################################### # Utility Methods @@ -721,7 +723,7 @@ class Trace(object): return False # Opening functions profiling JSON data file - logging.debug('Loading functions profiling data from [%s]...', path) + self._log.debug('Loading functions profiling data from [%s]...', path) with open(os.path.join(path), 'r') as fh: trace_stats = json.load(fh) @@ -751,8 +753,8 @@ class Trace(object): :returns: :mod:`pandas.Series` """ if not self.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, '\ - 'cannot compute CPU active signal!') + self._log.warning('Events [cpu_idle] not found, ' + 'cannot compute CPU active signal!') return None idle_df = self._dfg_trace_event('cpu_idle') @@ -789,8 +791,8 @@ class Trace(object): :returns: :mod:`pandas.Series` """ if not self.hasEvents('cpu_idle'): - logging.warn('Events [cpu_idle] not found, '\ - 'cannot compute cluster active signal!') + self._log.warning('Events [cpu_idle] not found, ' + 'cannot compute cluster active signal!') return None active = self.getCPUActiveSignal(cluster[0]).to_frame(name=cluster[0]) diff --git a/libs/wlgen/wlgen/perf_bench.py b/libs/wlgen/wlgen/perf_bench.py index 85463d0a5..df87bfa67 100644 --- a/libs/wlgen/wlgen/perf_bench.py +++ b/libs/wlgen/wlgen/perf_bench.py @@ -29,6 +29,7 @@ class PerfMessaging(Workload): target, name): + # Setup logging self.logger = logging.getLogger('perf_bench') # TODO: Assume perf is pre-installed on target diff --git a/libs/wlgen/wlgen/rta.py b/libs/wlgen/wlgen/rta.py index 1d5a3eb8a..e779e8345 100644 --- a/libs/wlgen/wlgen/rta.py +++ b/libs/wlgen/wlgen/rta.py @@ -17,7 +17,6 @@ import fileinput import json -import logging import os import re @@ -25,6 +24,8 @@ from collections import namedtuple from wlgen import Workload from devlib.utils.misc import ranges_to_list +import logging + _Phase = namedtuple('Phase', 'duration_s, period_ms, duty_cycle_pct') class Phase(_Phase): """ @@ -48,7 +49,8 @@ class RTA(Workload): name, calibration=None): - self.logger = logging.getLogger('rtapp') + # Setup logging + self._log = logging.getLogger('RTApp') # rt-app calibration self.pload = calibration @@ -78,15 +80,18 @@ class RTA(Workload): pload_regexp = re.compile(r'pLoad = ([0-9]+)ns') pload = {} + # Setup logging + log = logging.getLogger('RTApp') + # target.cpufreq.save_governors() target.cpufreq.set_all_governors('performance') for cpu in target.list_online_cpus(): - logging.info('CPU%d calibration...', cpu) + log.info('CPU%d calibration...', cpu) max_rtprio = int(target.execute('ulimit -Hr').split('\r')[0]) - logging.debug('Max RT prio: %d', max_rtprio) + log.debug('Max RT prio: %d', max_rtprio) if max_rtprio > 10: max_rtprio = 10 @@ -111,25 +116,25 @@ class RTA(Workload): if pload_match is None: continue pload[cpu] = int(pload_match.group(1)) - logging.debug('>>> cpu%d: %d', cpu, pload[cpu]) + log.debug('>>> cpu%d: %d', cpu, pload[cpu]) # target.cpufreq.load_governors() - logging.info('Target RT-App calibration:') - logging.info('%s', - "{" + ", ".join('"%r": %r' % (key, pload[key]) for key in pload) + "}") + log.info('Target RT-App calibration:') + log.info("{" + ", ".join('"%r": %r' % (key, pload[key]) + for key in pload) + "}") # Sanity check calibration values for big.LITTLE systems if 'bl' in target.modules: bcpu = target.bl.bigs_online[0] lcpu = target.bl.littles_online[0] if pload[bcpu] > pload[lcpu]: - logging.warning("Calibration values reports big cores less " - "capable than LITTLE cores") - raise RuntimeError("Calibration failed: try again or file a bug") + log.warning('Calibration values reports big cores less ' + 'capable than LITTLE cores') + raise RuntimeError('Calibration failed: try again or file a bug') bigs_speedup = ((float(pload[lcpu]) / pload[bcpu]) - 1) * 100 - logging.info("big cores are ~%.0f%% more capable than LITTLE cores", - bigs_speedup) + log.info('big cores are ~%.0f%% more capable than LITTLE cores', + bigs_speedup) return pload @@ -137,18 +142,15 @@ class RTA(Workload): destdir = params['destdir'] if destdir is None: return - self.logger.debug('%14s - Pulling logfiles to [%s]...', - 'RTApp', destdir) + self._log.debug('Pulling logfiles to [%s]...', destdir) for task in self.tasks.keys(): logfile = "'{0:s}/*{1:s}*.log'"\ .format(self.run_dir, task) self.target.pull(logfile, destdir) - self.logger.debug('%14s - Pulling JSON to [%s]...', - 'RTApp', destdir) + self._log.debug('Pulling JSON to [%s]...', destdir) self.target.pull('{}/{}'.format(self.run_dir, self.json), destdir) logfile = '{}/output.log'.format(destdir) - self.logger.debug('%14s - Saving output on [%s]...', - 'RTApp', logfile) + self._log.debug('Saving output on [%s]...', logfile) with open(logfile, 'w') as ofile: for line in self.output['executor'].split('\n'): ofile.write(line+'\n') @@ -231,24 +233,20 @@ class RTA(Workload): if self.pload is not None: if loadref and loadref.upper() == 'LITTLE': target_cpu = self._getFirstLittle() - self.logger.debug('%14s - ref on LITTLE cpu: %d', - 'RTApp', target_cpu) + self._log.debug('ref on LITTLE cpu: %d', target_cpu) else: target_cpu = self._getFirstBig() - self.logger.debug('%14s - ref on big cpu: %d', - 'RTApp', target_cpu) + self._log.debug('ref on big cpu: %d', target_cpu) return target_cpu # These options are selected only when RTApp has not been # already calibrated if self.cpus is None: target_cpu = self._getFirstBig() - self.logger.debug('%14s - ref on cpu: %d', - 'RTApp', target_cpu) + self._log.debug('ref on cpu: %d', target_cpu) else: target_cpu = self._getFirstBiggest(self.cpus) - self.logger.debug('%14s - ref on (possible) biggest cpu: %d', - 'RTApp', target_cpu) + self._log.debug('ref on (possible) biggest cpu: %d', target_cpu) return target_cpu def getCalibrationConf(self, target_cpu=0): @@ -305,11 +303,10 @@ class RTA(Workload): global_conf['calibration'] = calibration if self.duration is not None: global_conf['duration'] = self.duration - self.logger.warn('%14s - Limiting workload duration to %d [s]', - 'RTApp', global_conf['duration']) + self._log.warn('Limiting workload duration to %d [s]', + global_conf['duration']) else: - self.logger.info('%14s - Workload duration defined by longest task', - 'RTApp') + self._log.info('Workload duration defined by longest task') # Setup default scheduling class if 'policy' in self.sched: @@ -319,8 +316,7 @@ class RTA(Workload): .format(policy)) global_conf['default_policy'] = 'SCHED_' + self.sched['policy'] - self.logger.info('%14s - Default policy: %s', - 'RTApp', global_conf['default_policy']) + self._log.info('Default policy: %s', global_conf['default_policy']) # Setup global configuration self.rta_profile['global'] = global_conf @@ -350,29 +346,26 @@ class RTA(Workload): # Initialize task phases task_conf['phases'] = {} - self.logger.info('%14s - ------------------------', 'RTApp') - self.logger.info('%14s - task [%s], %s', 'RTApp', tid, sched_descr) + self._log.info('------------------------') + self._log.info('task [%s], %s', tid, sched_descr) if 'delay' in task.keys(): if task['delay'] > 0: task_conf['phases']['p000000'] = {} task_conf['phases']['p000000']['delay'] = int(task['delay'] * 1e6) - self.logger.info('%14s - | start delay: %.6f [s]', - 'RTApp', task['delay']) + self._log.info(' | start delay: %.6f [s]', + task['delay']) - self.logger.info('%14s - | calibration CPU: %d', - 'RTApp', target_cpu) + self._log.info(' | calibration CPU: %d', target_cpu) if 'loops' not in task.keys(): task['loops'] = 1 task_conf['loop'] = task['loops'] - self.logger.info('%14s - | loops count: %d', - 'RTApp', task['loops']) + self._log.info(' | loops count: %d', task['loops']) # Setup task affinity if 'cpus' in task and task['cpus']: - self.logger.info('%14s - | CPUs affinity: %s', - 'RTApp', task['cpus']) + self._log.info(' | CPUs affinity: %s', task['cpus']) if isinstance(task['cpus'], str): task_conf['cpus'] = ranges_to_list(task['cpus']) elif isinstance(task['cpus'], list): @@ -395,8 +388,8 @@ class RTA(Workload): # A duty-cycle of 0[%] translates on a 'sleep' phase if phase.duty_cycle_pct == 0: - self.logger.info('%14s - + phase_%06d: sleep %.6f [s]', - 'RTApp', pid, duration/1e6) + self._log.info(' + phase_%06d: sleep %.6f [s]', + pid, duration/1e6) task_phase = { 'loop': 1, @@ -406,8 +399,8 @@ class RTA(Workload): # A duty-cycle of 100[%] translates on a 'run-only' phase elif phase.duty_cycle_pct == 100: - self.logger.info('%14s - + phase_%06d: batch %.6f [s]', - 'RTApp', pid, duration/1e6) + self._log.info(' + phase_%06d: batch %.6f [s]', + pid, duration/1e6) task_phase = { 'loop': 1, @@ -425,15 +418,12 @@ class RTA(Workload): sleep_time = period * (100 - phase.duty_cycle_pct) / 100 running_time = period - sleep_time - self.logger.info( - '%14s - + phase_%06d: duration %.6f [s] (%d loops)', - 'RTApp', pid, duration/1e6, cloops) - self.logger.info( - '%14s - | period %6d [us], duty_cycle %3d %%', - 'RTApp', period, phase.duty_cycle_pct) - self.logger.info( - '%14s - | run_time %6d [us], sleep_time %6d [us]', - 'RTApp', running_time, sleep_time) + self._log.info('+ phase_%06d: duration %.6f [s] (%d loops)', + pid, duration/1e6, cloops) + self._log.info('| period %6d [us], duty_cycle %3d %%', + period, phase.duty_cycle_pct) + self._log.info('| run_time %6d [us], sleep_time %6d [us]', + running_time, sleep_time) task_phase = { 'loop': cloops, diff --git a/libs/wlgen/wlgen/workload.py b/libs/wlgen/wlgen/workload.py index 730b0159a..8b630e746 100644 --- a/libs/wlgen/wlgen/workload.py +++ b/libs/wlgen/wlgen/workload.py @@ -17,11 +17,12 @@ import fileinput import json -import logging import os import re from time import sleep +import logging + class Workload(object): def __init__(self, @@ -87,26 +88,29 @@ class Workload(object): # Map of task/s parameters self.params = {} - logging.info('%14s - Setup new workload %s', 'WlGen', self.name) + # Setup logging + self._log = logging.getLogger('Workload') + + self._log.info('Setup new workload %s', self.name) def __callback(self, step, **kwords): if step not in self.steps.keys(): raise ValueError('Callbacks for [%s] step not supported', step) if self.steps[step] is None: return - logging.debug('%14s - Callback [%s]...', 'WlGen', step) + self._log.debug('Callback [%s]...', step) self.steps[step](kwords) def setCallback(self, step, func): - logging.debug('%14s - Setup step [%s] callback to [%s] function', - 'WlGen', step, func.__name__) + self._log.debug('Setup step [%s] callback to [%s] function', + step, func.__name__) self.steps[step] = func def getCpusMask(self, cpus=None): mask = 0x0 for cpu in (cpus or self.target.list_online_cpus()): mask |= (1 << cpu) - # logging.debug('%14s - 0x{0:X}'.format(mask), 'WlGen') + # self._log.debug('0x{0:X}'.format(mask)) return mask def conf(self, @@ -136,16 +140,16 @@ class Workload(object): # Configure a profile workload if kind == 'profile': - logging.debug('%14s - Configuring a profile-based workload...', 'WlGen') + self._log.debug('Configuring a profile-based workload...') self.params['profile'] = params # Configure a custom workload elif kind == 'custom': - logging.debug('%14s - Configuring custom workload...', 'WlGen') + self._log.debug('Configuring custom workload...') self.params['custom'] = params else: - logging.error('%14s - %s is not a supported RTApp workload kind', 'WlGen', kind) + self._log.error('%s is not a supported RTApp workload kind', kind) raise ValueError('RTApp workload kind not supported') def run(self, @@ -204,7 +208,7 @@ class Workload(object): _command = self.command if not _command: - logging.error('%14s - Error: empty executor command', 'WlGen') + self._log.error('Error: empty executor command') # Prepend eventually required taskset command if cpus or self.cpus: @@ -227,28 +231,28 @@ class Workload(object): # Wait `start_pause` seconds before running the workload if start_pause_s: - logging.info('%14s - Waiting %f seconds before starting workload execution', - 'WlGen', start_pause_s) + self._log.info('Waiting %f seconds before starting workload execution', + start_pause_s) sleep(start_pause_s) # Start task in background if required if background: - logging.debug('%14s - WlGen [background]: %s', 'WlGen', _command) + self._log.debug('WlGen [background]: %s', _command) self.target.background(_command, as_root=as_root) self.output['executor'] = '' # Start task in foreground else: - logging.info('%14s - Workload execution START:', 'WlGen') - logging.info('%14s - %s', 'WlGen', _command) + self._log.info('Workload execution START:') + self._log.info(' %s', _command) # Run command and wait for it to complete results = self.target.execute(_command, as_root=as_root) self.output['executor'] = results # Wait `end_pause` seconds before stopping ftrace if end_pause_s: - logging.info('%14s - Waiting %f seconds before stopping trace collection', - 'WlGen', end_pause_s) + self._log.info('Waiting %f seconds before stopping trace collection', + end_pause_s) sleep(end_pause_s) # Stop FTrace (if required) @@ -258,16 +262,15 @@ class Workload(object): ftrace_dat = out_dir + '/' + self.test_label + '.dat' dirname = os.path.dirname(ftrace_dat) if not os.path.exists(dirname): - logging.debug('%14s - Create ftrace results folder [%s]', - 'WlGen', dirname) + self._log.debug('Create ftrace results folder [%s]', + dirname) os.makedirs(dirname) - logging.info('%14s - Pulling trace file into [%s]...', - 'WlGen', ftrace_dat) + self._log.info('Pulling trace file into [%s]...', ftrace_dat) ftrace.get_trace(ftrace_dat) if not background: self.__callback('postrun', destdir=out_dir) - logging.debug('%14s - Workload execution COMPLETED', 'WlGen') + self._log.debug('Workload execution COMPLETED') return ftrace_dat @@ -302,23 +305,23 @@ class Workload(object): return self.tasks if task_names is None: task_names = self.tasks.keys() - logging.debug('%14s - Lookup dataset for tasks...', 'WlGen') + self._log.debug('Lookup dataset for tasks...') for task_name in task_names: results = dataframe[dataframe[name_key] == task_name]\ [[name_key,pid_key]] if len(results)==0: - logging.error(' task %16s NOT found', 'WlGen', task_name) + self._log.error(' task %16s NOT found', task_name) continue (name, pid) = results.head(1).values[0] if name != task_name: - logging.error(' task %16s NOT found', 'WlGen', task_name) + self._log.error(' task %16s NOT found', task_name) continue if task_name not in self.tasks: self.tasks[task_name] = {} pids = list(results[pid_key].unique()) self.tasks[task_name]['pid'] = pids - logging.info('%14s - task %16s found, pid: %s', - 'WlGen', task_name, self.tasks[task_name]['pid']) + self._log.info(' task %16s found, pid: %s', + task_name, self.tasks[task_name]['pid']) return self.tasks def listAll(self, kill=False): @@ -326,15 +329,13 @@ class Workload(object): tasks = self.target.run('ps | grep {0:s}'.format(self.executor)) for task in tasks: task = task.split() - logging.info('%14s - %5s: %s (%s)', - 'WlGen', task[1], task[8], task[0]) + self._log.info('%5s: %s (%s)', task[1], task[8], task[0]) if kill: self.target.run('kill -9 {0:s}'.format(task[1])) def killAll(self): if self.executor is None: return - logging.info('%14s - Killing all [%s] instances:', - 'WlGen', self.executor) + self._log.info('Killing all [%s] instances:', self.executor) self.listAll(True) diff --git a/logging.conf b/logging.conf new file mode 100644 index 000000000..8cc20549e --- /dev/null +++ b/logging.conf @@ -0,0 +1,87 @@ + +################################################################################ +### Root Logger +################################################################################ + +[loggers] +keys=root + +[logger_root] +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +################################################################################ +### Modules Logger +################################################################################ +# To force a loglevel for a specific module you need to uncomment the relative +# section and add the logger name to the logger:keys list. +# +# For example, to enable debugging just for the TestEnv module, you need to +# uncomment the lovver_TestEnv section and set: +# [loggers] +# keys=root,TestEnv +# +# +# [logger_TestEnv] +# qualname=TestEnv +# level=DEBUG +# handlers=consoleHandler,fileHandler +# propagate=0 +# +# [logger_LisaTest] +# qualname=LisaTest +# level=DEBUG +# handlers=consoleHandler,fileHandler +# propagate=0 +# +# [logger_Executor] +# qualname=Executor +# level=DEBUG +# handlers=consoleHandler,fileHandler +# propagate=0 +# +# [logger_Workload] +# qualname=Workload +# level=DEBUG +# handlers=consoleHandler,fileHandler +# propagate=0 +# +# [logger_RTApp] +# qualname=RTApp +# level=DEBUG +# handlers=consoleHandler,fileHandler +# propagate=0 + + +################################################################################ +### Handlers +################################################################################ + +[handlers] +keys=consoleHandler,fileHandler + +[handler_consoleHandler] +class=StreamHandler +level=DEBUG +formatter=simpleFormatter +args=(sys.stderr,) + +[handler_fileHandler] +class=FileHandler +level=DEBUG +formatter=simpleFormatter +args=("lisa.log",) + + +################################################################################ +### Format +################################################################################ + +[formatters] +keys=simpleFormatter + +[formatter_simpleFormatter] +format=%(asctime)s %(levelname)-8s: %(name)-12s : %(message)s +datefmt= + diff --git a/tests/eas/capacity_capping.py b/tests/eas/capacity_capping.py index 040151a05..5dcb88da8 100644 --- a/tests/eas/capacity_capping.py +++ b/tests/eas/capacity_capping.py @@ -18,17 +18,12 @@ from bart.sched.SchedMultiAssert import SchedMultiAssert from env import TestEnv import json -import logging import time import trappy import unittest import os from wlgen import Periodic, RTA -# Configure logging to print something while the test runs -reload(logging) -logging.basicConfig(level=logging.INFO) - # Read the config file and update the globals CONF_FILE = os.path.join( os.path.dirname( -- GitLab From a9742b24c5935dbe807d0dc18e20c0dc33fe64ec Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 25 Nov 2016 17:59:32 +0000 Subject: [PATCH 2/5] lisa_shell: update test command Currently the test command does not allow to use the bash completion to defined the test to execute. This is quite annoying since we are masking unecessary a really useful shell functionality This patch fixes this as well as it enforce the usage of the locally defined logging configuration file as well as ensure to report in output all the log statements. This allows to monitor tests progress while the test is in execution and avoid to confuse the user which can otherwise think that the test is not running. Signed-off-by: Patrick Bellasi --- src/shell/lisa_shell | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index 0847d5957..53af52c75 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -266,7 +266,9 @@ EOF function _lisa-test { TEST=${1} -nosetests -v tests/$TEST +nosetests -v --nocapture --nologcapture \ + --logging-config=logging.conf \ + $TEST } function lisa-test { -- GitLab From 46d6cff5064f6b7ffdf4f3b0ccf4644396600575 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 22 Nov 2016 14:54:23 +0000 Subject: [PATCH 3/5] libs: enable commented logging to avoid code becoming not maintained Signed-off-by: Patrick Bellasi --- libs/utils/results.py | 20 ++++++++++---------- libs/wlgen/wlgen/workload.py | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/libs/utils/results.py b/libs/utils/results.py index 913214896..a85958392 100644 --- a/libs/utils/results.py +++ b/libs/utils/results.py @@ -372,33 +372,33 @@ class RTAppPerf(object): perf = np.multiply(perf, 100) self.prf['perf_avg'] = np.mean(perf) self.prf['perf_std'] = np.std(perf) - # self._log.debug('perf [%s]: %6.2f,%6.2f', - # self.name, self.prf['perf_mean'], - # self.prf['perf_std']) + self._log.debug('perf [%s]: %6.2f,%6.2f', + self.name, self.prf['perf_avg'], + self.prf['perf_std']) # Negative slacks nslacks = self.data[:,RTAPP_COL_SLACK] nslacks = nslacks[nslacks < 0] - # self._log.debug('Negative Slacks: %s', nslacks) + self._log.debug('Negative slacks: %s', nslacks) self.prf['slack_sum'] = -nslacks.sum() - # self._log.debug('Negative slack [%s] sum: %6.2f', - # self.name, self.prf['slack_sum']) + self._log.debug('Negative slack [%s] sum: %6.2f', + self.name, self.prf['slack_sum']) # Slack over run-time self.prf['run_sum'] = np.sum(self.data[:,RTAPP_COL_RUN]) self.prf['slack_pct'] = 100 * self.prf['slack_sum'] / self.prf['run_sum'] - # self._log.debug('SlackPct [%s]: %6.2f %%', self.name, self.slack_pct) + self._log.debug('SlackPct [%s]: %6.2f %%', self.name, self.slack_pct) if nrg is None: return # Computing EDP self.prf['edp1'] = nrg.total * math.pow(self.prf['run_sum'], 1) - # self._log.debug('EDP1 [%s]: {%6.2f}', self.name, self.prf['edp1']) + self._log.debug('EDP1 [%s]: {%6.2f}', self.name, self.prf['edp1']) self.prf['edp2'] = nrg.total * math.pow(self.prf['run_sum'], 2) - # self._log.debug('EDP2 [%s]: %6.2f', self.name, self.prf['edp2']) + self._log.debug('EDP2 [%s]: %6.2f', self.name, self.prf['edp2']) self.prf['edp3'] = nrg.total * math.pow(self.prf['run_sum'], 3) - # self._log.debug('EDP3 [%s]: %6.2f', self.name, self.prf['edp3']) + self._log.debug('EDP3 [%s]: %6.2f', self.name, self.prf['edp3']) # Columns of the per-task rt-app log file diff --git a/libs/wlgen/wlgen/workload.py b/libs/wlgen/wlgen/workload.py index 8b630e746..6ee1de025 100644 --- a/libs/wlgen/wlgen/workload.py +++ b/libs/wlgen/wlgen/workload.py @@ -110,7 +110,7 @@ class Workload(object): mask = 0x0 for cpu in (cpus or self.target.list_online_cpus()): mask |= (1 << cpu) - # self._log.debug('0x{0:X}'.format(mask)) + self._log.debug('CPUs mask for {%s}: 0x{0:X}'.format(cpus, mask)) return mask def conf(self, -- GitLab From c25cc8d70f24e6263b2a6b61b430e3f5ca34ed55 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 22 Nov 2016 14:53:20 +0000 Subject: [PATCH 4/5] libs/utils: cosmetics: update ValueError format strings Signed-off-by: Patrick Bellasi --- libs/utils/env.py | 3 +-- libs/utils/executor.py | 50 +++++++++++++++++++----------------------- 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/libs/utils/env.py b/libs/utils/env.py index 2d952b96f..79c49282d 100644 --- a/libs/utils/env.py +++ b/libs/utils/env.py @@ -845,8 +845,7 @@ class TestEnv(ShareState): self.tftp_deploy(tc['dtb']) else: - raise ValueError('%14s - Kernel installation method not supported', - 'KernelSetup') + raise ValueError('Kernel installation method not supported') # Keep track of last installed kernel self.kernel = tc['kernel'] diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 8bef1419f..abcac6c8f 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -94,11 +94,11 @@ class Executor(): # Check for mandatory configurations if not self._experiments_conf.get('confs', None): - raise ValueError( - 'Configuration error: missing \'conf\' definitions') + raise ValueError('Configuration error: ' + 'missing "conf" definitions') if not self._experiments_conf.get('wloads', None): - raise ValueError( - 'Configuration error: missing \'wloads\' definitions') + raise ValueError('Configuration error: ' + 'missing "wloads" definitions') self.te = test_env self.target = self.te.target @@ -356,10 +356,9 @@ class Executor(): if 'last' in cpus: return [ self.target.bl.bigs_online[-1] ] return self.target.bl.bigs_online - raise ValueError('Configuration error - ' - 'unsupported [{}] \'cpus\' value for [{}] '\ - 'workload specification'\ - .format(cpus, wl_idx)) + raise ValueError('unsupported [{}] "cpus" value for [{}] ' + 'workload specification' + .format(cpus, wl_idx)) def _wload_task_idxs(self, wl_idx, tasks): if type(tasks) == int: @@ -374,10 +373,9 @@ class Executor(): return range(len([t for t in self.target.core_names if t == self.target.big_core])) - raise ValueError('Configuration error - ' - 'unsupported \'tasks\' value for [{}] '\ - 'RT-App workload specification'\ - .format(wl_idx)) + raise ValueError('unsupported "tasks" value for [{}] RT-App ' + 'workload specification' + .format(wl_idx)) def _wload_rtapp(self, wl_idx, wlspec, cpus): conf = wlspec['conf'] @@ -399,10 +397,9 @@ class Executor(): if task['kind'] not in wlgen.__dict__: self._log.error('RTA task of kind [%s] not supported', task['kind']) - raise ValueError('Configuration error - ' - 'unsupported \'kind\' value for task [{}] '\ - 'in RT-App workload specification'\ - .format(task)) + raise ValueError('unsupported "kind" value for task [{}] ' + 'in RT-App workload specification' + .format(task)) task_ctor = getattr(wlgen, task['kind']) num_tasks = task.get('tasks', 1) task_idxs = self._wload_task_idxs(wl_idx, num_tasks) @@ -439,10 +436,9 @@ class Executor(): cpus=cpus, run_dir=self.te.run_dir) return rtapp - raise ValueError('Configuration error - ' - 'unsupported \'class\' value for [{}] '\ - 'RT-App workload specification'\ - .format(wl_idx)) + raise ValueError('unsupported \'class\' value for [{}] ' + 'RT-App workload specification' + .format(wl_idx)) def _wload_perf_bench(self, wl_idx, wlspec, cpus): conf = wlspec['conf'] @@ -458,10 +454,9 @@ class Executor(): perf_bench.conf(**conf['params']) return perf_bench - raise ValueError('Configuration error - '\ - 'unsupported \'class\' value for [{}] '\ - 'perf bench workload specification'\ - .format(wl_idx)) + raise ValueError('unsupported "class" value for [{}] ' + 'perf bench workload specification' + .format(wl_idx)) def _wload_conf(self, wl_idx, wlspec): @@ -482,10 +477,9 @@ class Executor(): return self._wload_perf_bench(wl_idx, wlspec, cpus) - raise ValueError('Configuration error - ' - 'unsupported \'type\' value for [{}] '\ - 'workload specification'\ - .format(wl_idx)) + raise ValueError('unsupported "type" value for [{}] ' + 'workload specification' + .format(wl_idx)) def _wload_init(self, tc, wl_idx): tc_idx = tc['tag'] -- GitLab From f17e608ef1f823e35ef0b3611fba530cf465fcf9 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Mon, 28 Nov 2016 15:24:15 +0000 Subject: [PATCH 5/5] ipynb: update all notebooks to use LisaConfig The new logging API is based on a single configuration file which is loaded via the LisaConfig::setup() method. This patch updates all the existing notebook to use this new and unified API. Signed-off-by: Patrick Bellasi --- ipynb/android/Android_Workloads.ipynb | 8 ++------ ipynb/android/benchmarks/Android_PCMark.ipynb | 10 +++------- .../android/workloads/Android_Recents_Fling.ipynb | 10 +++------- ipynb/android/workloads/Android_YouTube.ipynb | 10 +++------- ipynb/chromeos/ChromeOS_TestThat.ipynb | 10 +++------- ipynb/devlib/examples/cgroups.ipynb | 14 +++----------- ipynb/energy/EnergyModel_ClusterEnergy.ipynb | 10 +++------- ipynb/energy/EnergyModel_SystemEnergy.ipynb | 13 ++++--------- ipynb/examples/energy_meter/EnergyMeter_ACME.ipynb | 9 ++------- ipynb/examples/energy_meter/EnergyMeter_AEP.ipynb | 9 ++------- .../examples/energy_meter/EnergyMeter_HWMON.ipynb | 9 ++------- .../TraceAnalysis_FunctionsProfiling.ipynb | 9 ++------- .../trace_analysis/TraceAnalysis_IdleStates.ipynb | 8 ++------ .../TraceAnalysis_TasksLatencies.ipynb | 9 ++------- ipynb/profiling/kernel_functions_profiling.ipynb | 9 ++------- ipynb/releases/ReleaseNotes_v16.10.ipynb | 10 +++------- ipynb/sched_dvfs/smoke_test.ipynb | 10 +++------- ipynb/sched_tune/stune_juno_rampL.ipynb | 9 ++------- ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb | 9 ++------- ipynb/sched_tune/stune_oak_rampL.ipynb | 9 ++------- ipynb/thermal/ThermalSensorCharacterisation.ipynb | 8 ++------ ipynb/trappy/example_custom_events.ipynb | 10 +++------- ipynb/tutorial/00_LisaInANutshell.ipynb | 9 ++------- ipynb/tutorial/02_TestEnvUsage.ipynb | 9 ++------- ipynb/tutorial/03_WlGenUsage.ipynb | 9 ++------- ipynb/tutorial/04_ExecutorUsage.ipynb | 7 +++---- ipynb/tutorial/05_TrappyUsage.ipynb | 8 ++------ ipynb/tutorial/06_TraceAnalysis.ipynb | 8 ++------ ipynb/tutorial/07_PerfAnalysis.ipynb | 9 ++------- ipynb/utils/executor_example.ipynb | 8 ++------ ipynb/utils/testenv_example.ipynb | 10 +++------- ipynb/wlgen/rtapp_examples.ipynb | 9 ++------- ipynb/wlgen/simple_rtapp.ipynb | 8 ++------ 33 files changed, 79 insertions(+), 227 deletions(-) diff --git a/ipynb/android/Android_Workloads.ipynb b/ipynb/android/Android_Workloads.ipynb index 2b99359d3..ecfad25ec 100644 --- a/ipynb/android/Android_Workloads.ipynb +++ b/ipynb/android/Android_Workloads.ipynb @@ -16,12 +16,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/android/benchmarks/Android_PCMark.ipynb b/ipynb/android/benchmarks/Android_PCMark.ipynb index e8f432fd1..e014ad2fa 100644 --- a/ipynb/android/benchmarks/Android_PCMark.ipynb +++ b/ipynb/android/benchmarks/Android_PCMark.ipynb @@ -32,12 +32,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -513,7 +509,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/android/workloads/Android_Recents_Fling.ipynb b/ipynb/android/workloads/Android_Recents_Fling.ipynb index 7926371dd..8b39a49f8 100644 --- a/ipynb/android/workloads/Android_Recents_Fling.ipynb +++ b/ipynb/android/workloads/Android_Recents_Fling.ipynb @@ -30,12 +30,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -585,7 +581,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/android/workloads/Android_YouTube.ipynb b/ipynb/android/workloads/Android_YouTube.ipynb index a64829013..f430553f8 100644 --- a/ipynb/android/workloads/Android_YouTube.ipynb +++ b/ipynb/android/workloads/Android_YouTube.ipynb @@ -28,12 +28,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -521,7 +517,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/chromeos/ChromeOS_TestThat.ipynb b/ipynb/chromeos/ChromeOS_TestThat.ipynb index 6eac8ea21..3b5576697 100644 --- a/ipynb/chromeos/ChromeOS_TestThat.ipynb +++ b/ipynb/chromeos/ChromeOS_TestThat.ipynb @@ -43,12 +43,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.DEBUG)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -714,7 +710,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/devlib/examples/cgroups.ipynb b/ipynb/devlib/examples/cgroups.ipynb index f3187886d..344552ec8 100644 --- a/ipynb/devlib/examples/cgroups.ipynb +++ b/ipynb/devlib/examples/cgroups.ipynb @@ -9,16 +9,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)\n", - "# Uncomment the following lines to enabled CGroups verbose logging\n", - "logging.getLogger('cgroups').setLevel(logging.INFO)\n", - "logging.getLogger('cgroups.cpuset').setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -1261,7 +1253,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.12" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/energy/EnergyModel_ClusterEnergy.ipynb b/ipynb/energy/EnergyModel_ClusterEnergy.ipynb index d444a4c90..cd46046f0 100644 --- a/ipynb/energy/EnergyModel_ClusterEnergy.ipynb +++ b/ipynb/energy/EnergyModel_ClusterEnergy.ipynb @@ -32,12 +32,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -1682,7 +1678,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/energy/EnergyModel_SystemEnergy.ipynb b/ipynb/energy/EnergyModel_SystemEnergy.ipynb index d2d12419b..6c242d70e 100644 --- a/ipynb/energy/EnergyModel_SystemEnergy.ipynb +++ b/ipynb/energy/EnergyModel_SystemEnergy.ipynb @@ -44,12 +44,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -288,8 +284,7 @@ { "name": "stderr", "output_type": "stream", - "text": [ - ] + "text": [] } ], "source": [ @@ -2026,7 +2021,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.6" + "version": "2.7.9" }, "toc": { "toc_cell": false, diff --git a/ipynb/examples/energy_meter/EnergyMeter_ACME.ipynb b/ipynb/examples/energy_meter/EnergyMeter_ACME.ipynb index 13fc942ea..f80b1b47f 100644 --- a/ipynb/examples/energy_meter/EnergyMeter_ACME.ipynb +++ b/ipynb/examples/energy_meter/EnergyMeter_ACME.ipynb @@ -27,13 +27,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/examples/energy_meter/EnergyMeter_AEP.ipynb b/ipynb/examples/energy_meter/EnergyMeter_AEP.ipynb index 0ceeeb684..64e752fbb 100644 --- a/ipynb/examples/energy_meter/EnergyMeter_AEP.ipynb +++ b/ipynb/examples/energy_meter/EnergyMeter_AEP.ipynb @@ -36,13 +36,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/examples/energy_meter/EnergyMeter_HWMON.ipynb b/ipynb/examples/energy_meter/EnergyMeter_HWMON.ipynb index 3e44599c9..08754ebe9 100644 --- a/ipynb/examples/energy_meter/EnergyMeter_HWMON.ipynb +++ b/ipynb/examples/energy_meter/EnergyMeter_HWMON.ipynb @@ -27,13 +27,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb index 860bb1cd2..eb819d577 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb @@ -27,13 +27,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb index c275a6a41..f1aa6b0f9 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_IdleStates.ipynb @@ -31,12 +31,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb b/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb index d1e5a250f..784ce93a5 100644 --- a/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb +++ b/ipynb/examples/trace_analysis/TraceAnalysis_TasksLatencies.ipynb @@ -30,13 +30,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/profiling/kernel_functions_profiling.ipynb b/ipynb/profiling/kernel_functions_profiling.ipynb index ce6bb64ed..4e23dda2d 100644 --- a/ipynb/profiling/kernel_functions_profiling.ipynb +++ b/ipynb/profiling/kernel_functions_profiling.ipynb @@ -29,13 +29,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/releases/ReleaseNotes_v16.10.ipynb b/ipynb/releases/ReleaseNotes_v16.10.ipynb index cd180ed86..3e3ece946 100644 --- a/ipynb/releases/ReleaseNotes_v16.10.ipynb +++ b/ipynb/releases/ReleaseNotes_v16.10.ipynb @@ -12,12 +12,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()\n", "\n", "# Generate plots inline\n", "%matplotlib inline\n", @@ -987,7 +983,7 @@ }, "outputs": [], "source": [ - "#logging.getLogger().setLevel(logging.DEBUG)" + "# logging.getLogger().setLevel(logging.DEBUG)" ] }, { diff --git a/ipynb/sched_dvfs/smoke_test.ipynb b/ipynb/sched_dvfs/smoke_test.ipynb index f1699ec44..ed9cb706b 100644 --- a/ipynb/sched_dvfs/smoke_test.ipynb +++ b/ipynb/sched_dvfs/smoke_test.ipynb @@ -9,15 +9,11 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()\n", "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)\n", "# Uncomment the follwing line to enabled devlib debugging statements\n", - "#logging.getLogger('ssh').setLevel(logging.DEBUG)" + "# logging.getLogger('ssh').setLevel(logging.DEBUG)" ] }, { diff --git a/ipynb/sched_tune/stune_juno_rampL.ipynb b/ipynb/sched_tune/stune_juno_rampL.ipynb index 61e9e5297..8b5ded5bc 100644 --- a/ipynb/sched_tune/stune_juno_rampL.ipynb +++ b/ipynb/sched_tune/stune_juno_rampL.ipynb @@ -17,13 +17,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb b/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb index 7c0808db0..94be1c545 100644 --- a/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb +++ b/ipynb/sched_tune/stune_juno_taskonly_rampL.ipynb @@ -17,13 +17,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/sched_tune/stune_oak_rampL.ipynb b/ipynb/sched_tune/stune_oak_rampL.ipynb index a6ebbc4e3..cc74fa3eb 100644 --- a/ipynb/sched_tune/stune_oak_rampL.ipynb +++ b/ipynb/sched_tune/stune_oak_rampL.ipynb @@ -17,13 +17,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/thermal/ThermalSensorCharacterisation.ipynb b/ipynb/thermal/ThermalSensorCharacterisation.ipynb index a55b196ac..699edd4a5 100644 --- a/ipynb/thermal/ThermalSensorCharacterisation.ipynb +++ b/ipynb/thermal/ThermalSensorCharacterisation.ipynb @@ -25,12 +25,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/trappy/example_custom_events.ipynb b/ipynb/trappy/example_custom_events.ipynb index 261a9897a..2b1489d0a 100644 --- a/ipynb/trappy/example_custom_events.ipynb +++ b/ipynb/trappy/example_custom_events.ipynb @@ -9,15 +9,11 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()\n", "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)\n", "# Uncomment the follwing line to enabled devlib debugging statements\n", - "#logging.getLogger('ssh').setLevel(logging.DEBUG)" + "# logging.getLogger('ssh').setLevel(logging.DEBUG)" ] }, { diff --git a/ipynb/tutorial/00_LisaInANutshell.ipynb b/ipynb/tutorial/00_LisaInANutshell.ipynb index a0a9f78c9..15bbf95b5 100644 --- a/ipynb/tutorial/00_LisaInANutshell.ipynb +++ b/ipynb/tutorial/00_LisaInANutshell.ipynb @@ -100,13 +100,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/tutorial/02_TestEnvUsage.ipynb b/ipynb/tutorial/02_TestEnvUsage.ipynb index 2d754fb85..72faaa2f3 100644 --- a/ipynb/tutorial/02_TestEnvUsage.ipynb +++ b/ipynb/tutorial/02_TestEnvUsage.ipynb @@ -31,13 +31,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.DEBUG)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/tutorial/03_WlGenUsage.ipynb b/ipynb/tutorial/03_WlGenUsage.ipynb index aed96f51e..5d1f2fc15 100644 --- a/ipynb/tutorial/03_WlGenUsage.ipynb +++ b/ipynb/tutorial/03_WlGenUsage.ipynb @@ -30,13 +30,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/tutorial/04_ExecutorUsage.ipynb b/ipynb/tutorial/04_ExecutorUsage.ipynb index dd6c53ba9..239f609c4 100644 --- a/ipynb/tutorial/04_ExecutorUsage.ipynb +++ b/ipynb/tutorial/04_ExecutorUsage.ipynb @@ -31,9 +31,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { @@ -45,7 +44,7 @@ "outputs": [], "source": [ "# Execute this cell to enabled executor debugging statements\n", - "logging.getLogger().setLevel(logging.DEBUG)" + "logging.getLogger('Executor').setLevel(logging.DEBUG)" ] }, { diff --git a/ipynb/tutorial/05_TrappyUsage.ipynb b/ipynb/tutorial/05_TrappyUsage.ipynb index 02bc2d683..27ed4d6db 100644 --- a/ipynb/tutorial/05_TrappyUsage.ipynb +++ b/ipynb/tutorial/05_TrappyUsage.ipynb @@ -25,12 +25,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/tutorial/06_TraceAnalysis.ipynb b/ipynb/tutorial/06_TraceAnalysis.ipynb index 78afa3861..f9ed78b9b 100644 --- a/ipynb/tutorial/06_TraceAnalysis.ipynb +++ b/ipynb/tutorial/06_TraceAnalysis.ipynb @@ -26,12 +26,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/tutorial/07_PerfAnalysis.ipynb b/ipynb/tutorial/07_PerfAnalysis.ipynb index 0f332484a..9fce18f15 100644 --- a/ipynb/tutorial/07_PerfAnalysis.ipynb +++ b/ipynb/tutorial/07_PerfAnalysis.ipynb @@ -24,13 +24,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/utils/executor_example.ipynb b/ipynb/utils/executor_example.ipynb index ff891f64b..cc8fbe391 100644 --- a/ipynb/utils/executor_example.ipynb +++ b/ipynb/utils/executor_example.ipynb @@ -9,12 +9,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", - "logging.basicConfig(format=log_fmt)\n", - "\n", - "# Change to info once the notebook runs ok\n", - "#logging.getLogger().setLevel(logging.DEBUG)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/utils/testenv_example.ipynb b/ipynb/utils/testenv_example.ipynb index cdbab0dee..f3b950e09 100644 --- a/ipynb/utils/testenv_example.ipynb +++ b/ipynb/utils/testenv_example.ipynb @@ -9,14 +9,10 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()\n", "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)\n", - "# Uncomment the follwing line to enabled devlib debugging statements\n", + "# Comment the follwing line to disable devlib debugging statements\n", "logging.getLogger('ssh').setLevel(logging.DEBUG)" ] }, diff --git a/ipynb/wlgen/rtapp_examples.ipynb b/ipynb/wlgen/rtapp_examples.ipynb index 768b28e69..a109d14a0 100644 --- a/ipynb/wlgen/rtapp_examples.ipynb +++ b/ipynb/wlgen/rtapp_examples.ipynb @@ -9,13 +9,8 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", - "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)" + "from conf import LisaLogging\n", + "LisaLogging.setup()" ] }, { diff --git a/ipynb/wlgen/simple_rtapp.ipynb b/ipynb/wlgen/simple_rtapp.ipynb index a14398b81..866a0c4f3 100644 --- a/ipynb/wlgen/simple_rtapp.ipynb +++ b/ipynb/wlgen/simple_rtapp.ipynb @@ -9,13 +9,9 @@ "outputs": [], "source": [ "import logging\n", - "reload(logging)\n", - "logging.basicConfig(\n", - " format='%(asctime)-9s %(levelname)-8s: %(message)s',\n", - " datefmt='%I:%M:%S')\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()\n", "\n", - "# Enable logging at INFO level\n", - "logging.getLogger().setLevel(logging.INFO)\n", "# Comment the follwing line to disable devlib debugging statements\n", "# logging.getLogger('ssh').setLevel(logging.DEBUG)" ] -- GitLab