diff --git a/libs/devlib b/libs/devlib index 680406bc376990d42629ff56b3d4d8dbbc25e0dd..05215e7e1b6077bace8305ed94e17ff5cd6cfc0d 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit 680406bc376990d42629ff56b3d4d8dbbc25e0dd +Subproject commit 05215e7e1b6077bace8305ed94e17ff5cd6cfc0d diff --git a/libs/utils/__init__.py b/libs/utils/__init__.py index 075a73f28df91be20b8594084ce1ff1ba6d87a72..294c6a063c041e8e2322bb0e67e2e9c2d785421b 100644 --- a/libs/utils/__init__.py +++ b/libs/utils/__init__.py @@ -28,7 +28,7 @@ from perf_analysis import PerfAnalysis from report import Report -import android - from analysis_register import AnalysisRegister from analysis_module import AnalysisModule + +import android diff --git a/libs/utils/android/__init__.py b/libs/utils/android/__init__.py index 23b868f9893c4267ba31135a1babcc978d833327..3255d69b38f7b4830367008c0f545c941375d811 100644 --- a/libs/utils/android/__init__.py +++ b/libs/utils/android/__init__.py @@ -20,3 +20,24 @@ from screen import Screen from system import System from workload import Workload +from benchmark import LisaBenchmark + +# Initialization of Android Workloads +import os +import sys + +from glob import glob +from importlib import import_module + +# Add workloads dir to system path +workloads_dir = os.path.dirname(os.path.abspath(__file__)) +workloads_dir = os.path.join(workloads_dir, 'workloads') +sys.path.insert(0, workloads_dir) + +for filepath in glob(os.path.join(workloads_dir, '*.py')): + filename = os.path.splitext(os.path.basename(filepath))[0] + # Ignore __init__ files + if filename.startswith('__'): + continue + # Import workload module + import_module(filename) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..0f804ef405a990482147bae095dde45db7c9f225 --- /dev/null +++ b/libs/utils/android/benchmark.py @@ -0,0 +1,289 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import logging +import os +import select + +from subprocess import Popen, PIPE +from time import sleep + +from conf import LisaLogging +from android import System, Workload +from env import TestEnv + +from devlib.utils.misc import memoized +from devlib.utils.android import fastboot_command + +class LisaBenchmark(object): + """ + A base class for LISA custom benchmarks execution + + This class is intended to be subclassed in order to create a custom + benckmark execution for LISA. + It sets up the TestEnv and and provides convenience methods for + test environment setup, execution and post-processing. + + Subclasses should provide a bm_conf to setup the TestEnv and + a set of optional callback methods to configuere a test environment + and process collected data. + + Example users of this class can be found under LISA's tests/benchmarks + directory. + """ + + bm_conf = None + """Override this with a dictionary or JSON path to configure the TestEnv""" + + bm_name = None + """Override this with the name of the LISA's benchmark to run""" + + bm_params = None + """Override this with the set of parameters for the LISA's benchmark to run""" + + bm_collect = None + """Override this with the set of data to collect during test exeution""" + + def benchmarkInit(self): + """ + Code executed before running the benchmark + """ + pass + + def benchmarkFinalize(self): + """ + Code executed after running the benchmark + """ + pass + +################################################################################ +# Private Interface + + @memoized + def _parseCommandLine(self): + + parser = argparse.ArgumentParser( + description='LISA Benchmark Configuration') + + # Bootup settings + parser.add_argument('--boot-image', type=str, + default=None, + help='Path of the Android boot.img to be used') + parser.add_argument('--boot-timeout', type=int, + default=20, + help='Timeout in [s] to wait after a reboot (default 20)') + + # Android settings + parser.add_argument('--android-device', type=str, + default=None, + help='Identifier of the Android target to use') + parser.add_argument('--android-home', type=str, + default=None, + help='Path used to configure ANDROID_HOME') + + # Test customization + parser.add_argument('--results-dir', type=str, + default=None, + help='Results folder, ' + 'if specified override test defaults') + parser.add_argument('--collect', type=str, + default=None, + help='Set of metrics to collect, ' + 'e.g. "energy systrace_30" to sample energy and collect a 30s systrace, ' + 'if specified overrides test defaults') + + # Measurements settings + parser.add_argument('--iio-channel-map', type=str, + default=None, + help='List of IIO channels to sample, ' + 'e.g. "ch0:0,ch3:1" to sample CHs 0 and 3, ' + 'if specified overrides test defaults') + + # Parse command line arguments + return parser.parse_args() + + + def _getBmConf(self): + if self.bm_conf is None: + msg = 'Benchmark subclasses must override the `bm_conf` attribute' + raise NotImplementedError(msg) + + # Override default configuration with command line parameters + if self.args.android_device: + self.bm_conf['device'] = self.args.android_device + if self.args.android_home: + self.bm_conf['ANDROID_HOME'] = self.args.android_home + if self.args.results_dir: + self.bm_conf['results_dir'] = self.args.results_dir + if self.args.collect: + self.bm_collect = self.args.collect + + # Override energy meter configuration + if self.args.iio_channel_map: + em = { + 'instrument' : 'acme', + 'channel_map' : {}, + } + for ch in self.args.iio_channel_map.split(','): + ch_name, ch_id = ch.split(':') + em['channel_map'][ch_name] = ch_id + self.bm_conf['emeter'] = em + self._log.info('Using ACME energy meter channels: %s', em) + + # Override EM if energy collection not required + if 'energy' not in self.bm_collect: + try: + self.bm_conf.pop('emeter') + except: + pass + + return self.bm_conf + + def _getWorkload(self): + if self.bm_name is None: + msg = 'Benchmark subclasses must override the `bm_name` attribute' + raise NotImplementedError(msg) + # Get a referench to the worload to run + wl = Workload.getInstance(self.te, self.bm_name) + if wl is None: + raise ValueError('Specified benchmark [{}] is not supported'\ + .format(self.bm_name)) + return wl + + def _getBmParams(self): + if self.bm_params is None: + msg = 'Benchmark subclasses must override the `bm_params` attribute' + raise NotImplementedError(msg) + return self.bm_params + + def _getBmCollect(self): + if self.bm_collect is None: + msg = 'Benchmark subclasses must override the `bm_collect` attribute' + self._log.warning(msg) + return '' + return self.bm_collect + + def __init__(self): + """ + Set up logging and trigger running experiments + """ + LisaLogging.setup() + self._log = logging.getLogger('Benchmark') + + self._log.info('=== CommandLine parsing...') + self.args = self._parseCommandLine() + + self._log.info('=== TestEnv setup...') + self.bm_conf = self._getBmConf() + self.te = TestEnv(self.bm_conf) + self.target = self.te.target + + self._log.info('=== Initialization...') + self.wl = self._getWorkload() + self.out_dir=self.te.res_dir + try: + self.benchmarkInit() + except: + self._log.warning('Benchmark initialization failed: execution aborted') + raise + + self._log.info('=== Execution...') + self.wl.run(out_dir=self.out_dir, + collect=self._getBmCollect(), + **self.bm_params) + + self._log.info('=== Finalization...') + self.benchmarkFinalize() + + def _wait_for_logcat_idle(self, seconds=1): + lines = 0 + + # Clear logcat + # os.system('{} logcat -s {} -c'.format(adb, DEVICE)); + self.target.clear_logcat() + + # Dump logcat output + logcat_cmd = 'adb -s {} logcat'.format(self.target.adb_name) + logcat = Popen(logcat_cmd, shell=True, stdout=PIPE) + logcat_poll = select.poll() + logcat_poll.register(logcat.stdout, select.POLLIN) + + # Monitor logcat until it's idle for the specified number of [s] + self._log.info('Waiting for system to be almost idle') + self._log.info(' i.e. at least %d[s] of no logcat messages', seconds) + while True: + poll_result = logcat_poll.poll(seconds * 1000) + if not poll_result: + break + lines = lines + 1 + line = logcat.stdout.readline(1024) + if lines % 1000: + self._log.debug(' still waiting...') + if lines > 1e6: + self._log.warning('device logcat seems quite busy, ' + 'continuing anyway... ') + break + + def reboot_target(self, disable_charge=True): + """ + Reboot the target if a "boot-image" has been specified + + If the user specify a boot-image as a command line parameter, this + method will reboot the target with the specified kernel and wait + for the target to be up and running. + """ + + # Reboot the device, if a boot_image has been specified + if self.args.boot_image: + + self._log.warning('=== Rebooting...') + self._log.warning('Rebooting image to use: %s', self.args.boot_image) + + self._log.debug('Waiting 6[s] to enter bootloader...') + self.target.adb_reboot_bootloader() + sleep(6) + # self._fastboot('boot {}'.format(self.args.boot_image)) + cmd = 'boot {}'.format(self.args.boot_image) + fastboot_command(cmd, device=self.target.adb_name) + self._log.debug('Waiting {}[s] for boot to start...'\ + .format(self.args.boot_timeout)) + sleep(self.args.boot_timeout) + + else: + self._log.warning('Device NOT rebooted, using current image') + + # Restart ADB in root mode + self._log.warning('Restarting ADB in root mode...') + self.target.adb_root(force=True) + + # TODO add check for kernel SHA1 + self._log.warning('Skipping kernel SHA1 cross-check...') + + # Disable charge via USB + if disable_charge: + self._log.debug('Disabling charge over USB...') + self.target.charging_enabled = False + + # Log current kernel version + self._log.info('Running with kernel:') + self._log.info(' %s', self.target.kernel_version) + + # Wait for the system to complete the boot + self._wait_for_logcat_idle() + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/android/workload.py b/libs/utils/android/workload.py index 122c9a7c70ce4b840382c3cd827066082c8b5e02..8b419bbc899dffcf58ab31b550324e6e5528a284 100644 --- a/libs/utils/android/workload.py +++ b/libs/utils/android/workload.py @@ -15,107 +15,19 @@ # limitations under the License. # -import os -import sys import logging +import os +import re -from glob import glob -from inspect import isclass -from importlib import import_module - -from collections import namedtuple - +from . import System class Workload(object): """ Base class for Android related workloads """ - _availables = None - _AW = namedtuple('AndroidWorkload', - ['module_name', 'module', 'class_name', 'ctor']) - - @staticmethod - def get(te, name='YouTube'): - """ - Get a reference to the specified Android workload - """ - if Workload._availables is None: - Workload.availables(te.target) - # Build list of case insensitive workload names - log = logging.getLogger('Workload') - if name not in Workload._availables: - log.warning('Workload [%s] not available on target', name) - return None - return Workload._availables[name].ctor(te) - - @staticmethod - def availables(target): - """ - List the supported android workloads which are available on the target - """ - if Workload._availables: - return Workload._availables.keys() - - Workload._availables = {} - - log = logging.getLogger('Workload') - log.debug('Building list of available workloads...') - - # Add workloads dir to system path - workloads_dir = os.path.dirname(os.path.abspath(__file__)) - workloads_dir = os.path.join(workloads_dir, 'workloads') - log.debug('Workdir: %s', workloads_dir) - - sys.path.insert(0, workloads_dir) - log.debug('Syspath: %s', sys.path) - - for filepath in glob(os.path.join(workloads_dir, '*.py')): - filename = os.path.splitext(os.path.basename(filepath))[0] - log.debug('Filename: %s', filename) - - # Ignore __init__ files - if filename.startswith('__'): - continue - - # Import the module for inspection - module = import_module(filename) - for member in dir(module): - # Ignore the base class - if member == 'Workload': - continue - handler = getattr(module, member) - if handler and isclass(handler) and \ - issubclass(handler, Workload): - class_name = handler.__name__ - module_name = module.__name__ - # Check if a package is required and is available on target - aw = Workload._AW(module_name, module, class_name, handler) - if (Workload._is_available(target, aw)): - # Keep track of classes which are 'Android.Workload' - Workload._availables[class_name] = aw - - return Workload._availables.keys() - - @staticmethod - def _is_available(target, aw): - try: - package = getattr(aw.ctor, 'package') - except AttributeError: - # Assume workloads not requiring a package - # are always available - return True - - # Check for the package being available - count = target.execute('pm list packages | grep {} | wc -l'\ - .format(package)) - if int(count) >= 1: - return True - - log = logging.getLogger('Workload') - log.warning('Package [%s] not installed', package) - log.warning('Workload [%s] disabled', aw.class_name) - return False + _packages = None + _availables = {} def __init__(self, test_env): """ @@ -123,20 +35,105 @@ class Workload(object): test_env: target test environmen """ - self.te = test_env - self.target = test_env.target + self._te = test_env + self._target = test_env.target self._log = logging.getLogger('Workload') - wloads = Workload.availables(self.target) - self._log.info('Workloads available on target:') - self._log.info(' %s', wloads) + # Set of data reported in output of each run + self.trace_file = None + self.nrg_report = None def _adb(self, cmd): - return 'adb -s {} {}'.format(self.target.adb_name, cmd) + return 'adb -s {} {}'.format(self._target.adb_name, cmd) + + @classmethod + def _check_availables(cls, test_env): + """ + List the supported android workloads which are available on the target + """ + + _log = logging.getLogger('Workload') + + # Getting the list of installed packages + cls._packages = test_env.target.list_packages() + _log.debug('Packages:\n%s', cls._packages) + + _log.debug('Building list of available workloads...') + for sc in Workload.__subclasses__(): + _log.debug('Checking workload [%s]...', sc.__name__) + if sc.package in cls._packages: + cls._availables[sc.__name__.lower()] = sc + + _log.info('Supported workloads available on target:') + _log.info(' %s', ', '.join(cls._availables.keys())) + + @classmethod + def getInstance(cls, test_env, name): + """ + Get a reference to the specified Android workload + """ + + # Initialize list of available workloads + if cls._packages is None: + cls._check_availables(test_env) + + if name.lower() not in cls._availables: + msg = 'Workload [{}] not available on target'.format(name) + raise ValueError(msg) + return cls._availables[name.lower()](test_env) - def run(self, exp_dir, **kwargs): + def run(self, out_dir, collect='', + **kwargs): raise RuntimeError('Not implemeted') + def tracingStart(self): + if 'ftrace' in self.collect and 'systrace' in self.collect: + msg = 'ftrace and systrace cannot be used at the same time' + raise ValueError(msg) + # Start FTrace + if 'ftrace' in self.collect: + self.trace_file = os.path.join(self.out_dir, 'trace.dat') + self._log.info('FTrace START') + self._te.ftrace.start() + # Start Systrace (mutually exclusive with ftrace) + elif 'systrace' in self.collect: + self.trace_file = os.path.join(self.out_dir, 'trace.html') + # Get the systrace time + match = re.search(r'systrace_([0-9]+)', self.collect) + if match: + self._trace_time = match.group(1) + else: + # TODO: must implement a CTRL+C based systrace stopping + self._log.warning("Systrace time NOT defined, tracing for 10[s]") + self._trace_time = 10 + self._log.info('Systrace START') + self._systrace_output = System.systrace_start( + self._te, self.trace_file, self._trace_time) + # Initialize energy meter results + if 'energy' in self.collect and self._te.emeter: + self._te.emeter.reset() + self._log.info('Energy meter STARTED') + + def tracingStop(self): + # Collect energy meter results + if 'energy' in self.collect and self._te.emeter: + self.nrg_report = self._te.emeter.report(self.out_dir) + self._log.info('Energy meter STOPPED') + # Stop FTrace + if 'ftrace' in self.collect: + self._te.ftrace.stop() + self._log.info('FTrace STOP') + self._te.ftrace.get_trace(self.trace_file) + # Stop Systrace (mutually exclusive with ftrace) + elif 'systrace' in self.collect: + if not self.systrace_output: + self._log.warning('Systrace is not running!') + else: + self._log.info('Waiting systrace report [%s]...', + self.trace_file) + self.systrace_output.wait() + # Dump a platform description + self._te.platform_dump(self.out_dir) # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/android/workloads/jankbench.py b/libs/utils/android/workloads/jankbench.py index 5caed549628b36afdcfad9f8685a3018fd90589c..2cc1f06dccc8708e8895e1cd3cd20779eddb815e 100644 --- a/libs/utils/android/workloads/jankbench.py +++ b/libs/utils/android/workloads/jankbench.py @@ -20,8 +20,8 @@ import os import logging from subprocess import Popen, PIPE -from android import Screen, System, Workload +from android import Screen, System, Workload # Available test workloads _jankbench = { @@ -66,28 +66,34 @@ class Jankbench(Workload): self._log = logging.getLogger('Jankbench') self._log.debug('Workload created') - def run(self, exp_dir, test_name, iterations, collect=''): + # Set of output data reported by Jankbench + self.db_file = None + + def run(self, out_dir, collect, + test_name, iterations): + + # Keep track of mandatory parameters + self.out_dir = out_dir + self.collect = collect + # Setup test id try: test_id = _jankbench[test_name] except KeyError: raise ValueError('Jankbench test [%s] not supported', test_name) - # Initialize energy meter results - nrg_report = None - # Make sure we exit the app if already open - System.menu(self.target) - System.back(self.target) + System.menu(self._target) + System.back(self._target) # Close and clear application - System.force_stop(self.target, self.package, clear=True) + System.force_stop(self._target, self.package, clear=True) # Set airplane mode - System.set_airplane_mode(self.target, on=True) + System.set_airplane_mode(self._target, on=True) # Force screen in PORTRAIT mode - Screen.set_orientation(self.target, portrait=True) + Screen.set_orientation(self._target, portrait=True) # Clear logcat os.system(self._adb('logcat -c')); @@ -98,12 +104,12 @@ class Jankbench(Workload): '--ei "com.android.benchmark.EXTRA_RUN_COUNT" {1}'\ .format(test_id, iterations) self._log.info(test_cmd) - self.target.execute(test_cmd); + self._target.execute(test_cmd); # Parse logcat output lines logcat_cmd = self._adb( 'logcat ActivityManager:* System.out:I *:S BENCH:*'\ - .format(self.target.adb_name)) + .format(self._target.adb_name)) self._log.info(logcat_cmd) self._log.debug('Iterations:') @@ -116,16 +122,14 @@ class Jankbench(Workload): # Benchmark start trigger match = JANKBENCH_BENCHMARK_START_RE.search(message) if match: - if 'energy' in collect and self.te.emeter: - self.te.emeter.reset() + self.tracingStart() self._log.debug('Benchmark started!') # Benchmark completed trigger match = JANKBENCH_BENCHMARK_DONE_RE.search(message) if match: - if 'energy' in collect and self.te.emeter: - nrg_report = self.te.emeter.report(exp_dir) self._log.debug('Benchmark done!') + self.tracingStop() break # Iteration completd @@ -144,18 +148,16 @@ class Jankbench(Workload): int(match.group('count_junk'))) # get results - db_file = os.path.join(exp_dir, JANKBENCH_DB_NAME) - self.target.pull(JANKBENCH_DB_PATH + JANKBENCH_DB_NAME, db_file) + self.db_file = os.path.join(out_dir, JANKBENCH_DB_NAME) + self._target.pull(JANKBENCH_DB_PATH + JANKBENCH_DB_NAME, self.db_file) - System.force_stop(self.target, self.package, clear=True) + System.force_stop(self._target, self.package, clear=True) # Go back to home screen - System.home(self.target) + System.home(self._target) # Reset initial setup - Screen.set_orientation(self.target, auto=True) - System.set_airplane_mode(self.target, on=False) - - return db_file, nrg_report + Screen.set_orientation(self._target, auto=True) + System.set_airplane_mode(self._target, on=False) # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/android/workloads/uibench.py b/libs/utils/android/workloads/uibench.py index 20cb6b210bcc2fe5edfb53ea09184b35077a9678..967ac57d14a3a7448616188570c83ea5ca56036d 100644 --- a/libs/utils/android/workloads/uibench.py +++ b/libs/utils/android/workloads/uibench.py @@ -20,9 +20,11 @@ import os import logging from subprocess import Popen, PIPE -from android import Screen, System, Workload from time import sleep +from android import Screen, System +from android.workload import Workload + class UiBench(Workload): """ @@ -55,11 +57,16 @@ class UiBench(Workload): self._log = logging.getLogger('UiBench') self._log.debug('Workload created') - def run(self, exp_dir, test_name, duration_s, collect=''): + # Set of output data reported by UiBench + self.db_file = None + + def run(self, out_dir, collect, + test_name, duration_s): activity = '.' + test_name + 'Activity' - # Initialize energy meter results - nrg_report = None + # Keep track of mandatory parameters + self.out_dir = out_dir + self.collect = collect # Press Back button to be sure we run the video from the start System.menu(self.target) @@ -108,8 +115,7 @@ class UiBench(Workload): # Benchmark start trigger match = UIBENCH_BENCHMARK_START_RE.search(message) if match: - if 'energy' in collect and self.te.emeter: - self.te.emeter.reset() + self.tracingStart() self._log.debug("Benchmark started!") break @@ -117,14 +123,13 @@ class UiBench(Workload): self._log.info('Benchmark [%s] started, waiting %d [s]', activity, duration_s) sleep(duration_s) - self._log.debug("Benchmark done!") - if 'energy' in collect and self.te.emeter: - nrg_report = self.te.emeter.report(exp_dir) + self._log.debug("Benchmark done!") + self.tracingStop() # Get frame stats - db_file = os.path.join(exp_dir, "framestats.txt") - System.gfxinfo_get(self.target, self.package, db_file) + self.db_file = os.path.join(out_dir, "framestats.txt") + System.gfxinfo_get(self.target, self.package, self.db_file) # Close and clear application System.force_stop(self.target, self.package, clear=True) @@ -136,6 +141,4 @@ class UiBench(Workload): Screen.set_orientation(self.target, auto=True) System.set_airplane_mode(self.target, on=False) - return db_file, nrg_report - # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/android/workloads/youtube.py b/libs/utils/android/workloads/youtube.py index 4aa33335977eeb6c680ebf8d603c61c8a84eff9c..d3196d70d310af3b13ff69fa614a8279902eb8d9 100644 --- a/libs/utils/android/workloads/youtube.py +++ b/libs/utils/android/workloads/youtube.py @@ -19,9 +19,11 @@ import re import os import logging -from android import Screen, Workload, System from time import sleep +from android import Screen, System +from android.workload import Workload + class YouTube(Workload): """ @@ -37,10 +39,15 @@ class YouTube(Workload): self._log = logging.getLogger('YouTube') self._log.debug('Workload created') - def run(self, exp_dir, video_url, video_duration_s, collect=''): + # Set of output data reported by Jankbench + self.db_file = None + + def run(self, out_dir, collect, + video_url, video_duration_s): - # Initialize energy meter results - nrg_report = None + # Keep track of mandatory parameters + self.out_dir = out_dir + self.collect = collect # Unlock device screen (assume no password required) System.menu(self.target) @@ -63,21 +70,15 @@ class YouTube(Workload): # Allow the activity to start sleep(1) - # Start energy collection - if 'energy' in collect and self.te.emeter: - self.te.emeter.reset() - # Wait until the end of the video + self.tracingStart() self._log.info('Play video for %d [s]', video_duration_s) sleep(video_duration_s) - - # Stop energy collection - if 'energy' in collect and self.te.emeter: - nrg_report = self.te.emeter.report(exp_dir) + self.tracingStop() # Get frame stats - db_file = os.path.join(exp_dir, "framestats.txt") - System.gfxinfo_get(self.target, self.package, db_file) + self.db_file = os.path.join(out_dir, "framestats.txt") + System.gfxinfo_get(self.target, self.package, self.db_file) # Close the app without clearing the local data to # avoid the dialog to select the account at next start @@ -89,6 +90,4 @@ class YouTube(Workload): # Switch back to screen auto rotation Screen.set_orientation(self.target, auto=True) - return db_file, nrg_report - # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/logging.conf b/logging.conf index 8cc20549e5950dfe820b51f0c6ceac9ba878f454..b46e850bde38e1e8555a3768bc84c34ebd3e27db 100644 --- a/logging.conf +++ b/logging.conf @@ -19,39 +19,80 @@ propagate=0 # # For example, to enable debugging just for the TestEnv module, you need to # uncomment the lovver_TestEnv section and set: -# [loggers] -# keys=root,TestEnv -# -# -# [logger_TestEnv] -# qualname=TestEnv -# level=DEBUG -# handlers=consoleHandler,fileHandler -# propagate=0 -# -# [logger_LisaTest] -# qualname=LisaTest -# level=DEBUG -# handlers=consoleHandler,fileHandler -# propagate=0 -# -# [logger_Executor] -# qualname=Executor -# level=DEBUG -# handlers=consoleHandler,fileHandler -# propagate=0 -# -# [logger_Workload] -# qualname=Workload -# level=DEBUG -# handlers=consoleHandler,fileHandler -# propagate=0 -# -# [logger_RTApp] -# qualname=RTApp -# level=DEBUG -# handlers=consoleHandler,fileHandler -# propagate=0 +[loggers] +keys=root,Target,AndroidTarget,android,LinuxTarget,ssh,TestEnv,LisaTest,Executor,Workload,RTApp,Benchmark,local_connection + +[logger_Target] +qualname=Target +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_TestEnv] +qualname=TestEnv +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_LisaTest] +qualname=LisaTest +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_Executor] +qualname=Executor +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_Workload] +qualname=Workload +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_RTApp] +qualname=RTApp +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_Benchmark] +qualname=Benchmark +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_AndroidTarget] +qualname=AndroidTarget +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_android] +qualname=android +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_LinuxTarget] +qualname=LinuxTarget +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_ssh] +qualname=ssh +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + +[logger_local_connection] +qualname=Local +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 ################################################################################ diff --git a/tests/benchmarks/android_jankbench.py b/tests/benchmarks/android_jankbench.py new file mode 100755 index 0000000000000000000000000000000000000000..160373035319002236673fc76b4cfdbc3f3a5fe9 --- /dev/null +++ b/tests/benchmarks/android_jankbench.py @@ -0,0 +1,195 @@ +#!/usr/bin/python + +import os + +from time import sleep + +# The workload class MUST be loaded before the LisaBenchmark +from android import Workload +from android import LisaBenchmark + +from devlib.exception import TargetError + +class JankbenchTest(LisaBenchmark): + + bm_conf = { + + # Target platform and board + "platform" : 'android', + + # Define devlib modules to load + "modules" : [ + 'cpufreq', + ], + + # FTrace events to collect for all the tests configuration which have + # the "ftrace" flag enabled + "ftrace" : { + "events" : [ + "sched_switch", + "sched_overutilized", + "sched_contrib_scale_f", + "sched_load_avg_cpu", + "sched_load_avg_task", + "sched_tune_tasks_update", + "sched_boost_cpu", + "sched_boost_task", + "sched_energy_diff", + "cpu_frequency", + "cpu_idle", + "cpu_capacity", + ], + "buffsize" : 10 * 1024, + }, + + # Default EnergyMeter Configuration + "emeter" : { + "instrument" : "acme", + "channel_map" : { + "Device0" : 0, + } + }, + + # Tools required by the experiments + "tools" : [ 'trace-cmd' ], + + # Default results folder + "results_dir" : "AndroidJankbench", + + } + + # Android Workload to run + bm_name = 'Jankbench' + + # Default products to be collected + bm_collect = 'ftrace energy' + + def benchmarkInit(self): + self.setupWorkload() + self.setupGovernor() + if self.reboot: + self.reboot_target() + + def benchmarkFinalize(self): + if self.delay_after_s: + self._log.info("Waiting %d[s] before to continue...", + self.delay_after_s) + sleep(self.delay_after_s) + + def __init__(self, governor, test, iterations, + reboot=False, delay_after_s=0): + self.reboot = reboot + self.governor = governor + self.test = test + self.iterations = iterations + self.delay_after_s = delay_after_s + super(JankbenchTest, self).__init__() + + def setupWorkload(self): + # Create a results folder for each "governor/test" + self.out_dir = os.path.join(self.te.res_dir, governor, self.test) + try: + os.stat(self.out_dir) + except: + os.makedirs(self.out_dir) + # Setup workload parameters + self.bm_params = { + 'test_name' : self.test, + 'iterations' : self.iterations, + } + + def setupGovernor(self): + try: + self.target.cpufreq.set_all_governors(self.governor); + except TargetError: + self._log.warning('Governor [%s] not available on target', + self.governor) + raise + + # Setup schedutil parameters + if self.governor == 'schedutil': + rate_limit_us = 2000 + # Different schedutil versions have different tunables + tunables = self.target.cpufreq.list_governor_tunables(0) + if 'rate_limit_us' in tunables: + tunables = {'rate_limit_us' : str(rate_limit_us)} + else: + assert ('up_rate_limit_us' in tunables and + 'down_rate_limit_us' in tunables) + tunables = { + 'up_rate_limit_us' : str(rate_limit_us), + 'down_rate_limit_us' : str(rate_limit_us) + } + + try: + for cpu_id in range(self.te.platform['cpus_count']): + self.target.cpufreq.set_governor_tunables( + cpu_id, 'schedutil', **tunables) + except TargetError as e: + self._log.warning('Failed to set schedutils parameters: {}'\ + .format(e)) + raise + self._log.info('Set schedutil.rate_limit_us=%d', rate_limit_us) + + # Setup ondemand parameters + if self.governor == 'ondemand': + try: + for cpu_id in range(self.te.platform['cpus_count']): + tunables = self.target.cpufreq.get_governor_tunables(cpu_id) + self.target.cpufreq.set_governor_tunables( + cpu_id, 'ondemand', + **{'sampling_rate' : tunables['sampling_rate_min']}) + except TargetError as e: + self._log.warning('Failed to set ondemand parameters: {}'\ + .format(e)) + raise + self._log.info('Set ondemand.sampling_rate to minimum supported') + + # Report configured governor + governors = self.target.cpufreq.get_all_governors() + self._log.info('Using governors: %s', governors) + + +# Run the benchmark in each of the supported governors + +iterations = 1 + +governors = [ + 'performance', + 'powersave', + 'ondemand', + 'interactive', + 'sched', + 'schedutil' +] + +tests = [ + 'list_view', + 'image_list_view', + 'shadow_grid', + 'low_hitrate_text', + 'high_hitrate_text', + 'edit_text' +] + +# Reboot device only the first time +do_reboot = True +tests_remaining = len(governors) * len(tests) +tests_completed = 0 +for governor in governors: + for test in tests: + tests_remaining -= 1 + delay_after_s = 30 if tests_remaining else 0 + try: + JankbenchTest(governor, test, iterations, + do_reboot, delay_after_s) + tests_completed += 1 + except: + # A test configuraion failed, continue with other tests + pass + do_reboot = False + +# We want to collect data from at least one governor +assert(tests_completed >= 1) + +# vim :set tabstop=4 shiftwidth=4 expandtab