diff --git a/libs/wlgen/wlgen/perf_bench.py b/libs/wlgen/wlgen/perf_bench.py index df87bfa6756f9f24718ab893a8b8c1a8daa1df77..e0cdcce7818833105aba6afdd26585a989fd131d 100644 --- a/libs/wlgen/wlgen/perf_bench.py +++ b/libs/wlgen/wlgen/perf_bench.py @@ -35,7 +35,7 @@ class PerfMessaging(Workload): # TODO: Assume perf is pre-installed on target #target.setup('perf') - super(PerfMessaging, self).__init__(target, name, None) + super(PerfMessaging, self).__init__(target, name) # perf "sched" executor self.wtype = 'perf_bench_messaging' @@ -46,22 +46,26 @@ class PerfMessaging(Workload): def conf(self, group = 1, - loop = 10, + loop = 500, pipe = '', thread = '', cpus=None, cgroup=None, - exc_id=0): + exc_id=0, + run_dir=None): if pipe is not '': pipe = '--pipe' if thread is not '': thread = '--thread' - super(PerfMessaging, self).conf('custom', - {'group': str(group), 'loop': str(loop), 'pipe': pipe, 'thread': thread}, - 0, cpus, cgroup, exc_id) - + super(PerfMessaging, self).conf( + 'custom', + params={'group': str(group), + 'loop': str(loop), + 'pipe': pipe, + 'thread': thread}, + duration=0, cpus=cpus, exc_id=exc_id, run_dir=run_dir) self.command = '{0:s}/perf bench sched messaging {1:s} {2:s} --group {3:s} --loop {4:s}'\ .format(self.target.executables_directory, @@ -120,7 +124,10 @@ class PerfPipe(Workload): # TODO: Assume perf is pre-installed on target #target.setup('perf') - super(PerfPipe, self).__init__(target, name, None) + # Setup logging + self.logger = logging.getLogger('perf_bench') + + super(PerfPipe, self).__init__(target, name) # perf "sched" executor self.wtype = 'perf_bench_pipe' diff --git a/libs/wlgen/wlgen/rta.py b/libs/wlgen/wlgen/rta.py index ceaa8b6fc4937b2108f6add2215dbc8ae1d61368..4531bbc76f0b46201014bc44cbd2d807d12eaefc 100644 --- a/libs/wlgen/wlgen/rta.py +++ b/libs/wlgen/wlgen/rta.py @@ -43,11 +43,20 @@ class Phase(_Phase): pass class RTA(Workload): + """ + Class for creating RT-App workloads + """ def __init__(self, target, name, calibration=None): + """ + :param target: Devlib target to run workload on. + :param name: Human-readable name for the workload + :param calibration: CPU calibration specification. Can be obtained from + :meth:`calibrate`. + """ # Setup logging self._log = logging.getLogger('RTApp') @@ -58,7 +67,7 @@ class RTA(Workload): # TODO: Assume rt-app is pre-installed on target # self.target.setup('rt-app') - super(RTA, self).__init__(target, name, calibration) + super(RTA, self).__init__(target, name) # rt-app executor self.wtype = 'rtapp' @@ -77,6 +86,12 @@ class RTA(Workload): @staticmethod def calibrate(target): + """ + Calibrate RT-App on each CPU in the system + + :param target: Devlib target to run calibration on + :returns: Dict mapping CPU numbers to RT-App calibration values + """ pload_regexp = re.compile(r'pLoad = ([0-9]+)ns') pload = {} @@ -144,12 +159,13 @@ class RTA(Workload): return self._log.debug('Pulling logfiles to [%s]...', destdir) for task in self.tasks.keys(): - logfile = "'{0:s}/*{1:s}*.log'"\ - .format(self.run_dir, task) + logfile = self.target.path.join(self.run_dir, + '*{}*.log'.format(task)) self.target.pull(logfile, destdir) self._log.debug('Pulling JSON to [%s]...', destdir) - self.target.pull('{}/{}'.format(self.run_dir, self.json), destdir) - logfile = '{}/output.log'.format(destdir) + self.target.pull(self.target.path.join(self.run_dir, self.json), + destdir) + logfile = self.target.path.join(destdir, 'output.log') self._log.debug('Saving output on [%s]...', logfile) with open(logfile, 'w') as ofile: for line in self.output['executor'].split('\n'): @@ -483,32 +499,41 @@ class RTA(Workload): workloads. The classes supported so far are detailed hereafter. Custom workloads - ---------------- - When 'kind' is 'custom' the tasks generated by this workload are the - ones defined in a provided rt-app JSON configuration file. - In this case the 'params' parameter must be used to specify the - complete path of the rt-app JSON configuration file to use. - + When 'kind' is 'custom' the tasks generated by this workload are the + ones defined in a provided rt-app JSON configuration file. + In this case the 'params' parameter must be used to specify the + complete path of the rt-app JSON configuration file to use. Profile based workloads - ----------------------- - When 'kind' is 'profile' the tasks generated by this workload have a - profile which is defined by a sequence of phases and they are defined - according to the following grammar: + When ``kind`` is "profile", ``params`` is a dictionary mapping task + names to task specifications. The easiest way to create these task + specifications using :meth:`RTATask.get`. + + For example, the following configures an RTA workload with a single + task, named 't1', using the default parameters for a Periodic RTATask: + + :: + + wl = RTA(...) + wl.conf(kind='profile', params={'t1': Periodic().get()}) - params := {task, ...} - task := NAME : {SCLASS, PRIO, [phase, ...]} - phase := (PTIME, PRIOD, DCYCLE) + :param kind: Either 'custom' or 'profile' - see above. + :param params: RT-App parameters - see above. + :param duration: Maximum duration of the workload in seconds. Any + remaining tasks are killed by rt-app when this time has + elapsed. + :param cpus: CPUs to restrict this workload to, using ``taskset``. - where the terminals are: - NAME : string, the task name (max 16 chars) - SCLASS : string, the scheduling class (OTHER, FIFO, RR) - PRIO : int, the priority of the task - PTIME : float, length of the current phase in [s] - PERIOD : float, task activation interval in [ms] - DCYCLE : int, task running interval in [0..100]% within each period + :param sched: Global RT-App scheduler configuration. Dict with fields: + policy + The default scheduler policy. Choose from 'OTHER', 'FIFO', 'RR', + and 'DEADLINE'. + + :param run_dir: Target dir to store output and config files in. + + .. TODO: document or remove loadref """ if not sched: @@ -536,12 +561,25 @@ class RTA(Workload): self.test_label = '{0:s}_{1:02d}'.format(self.name, self.exc_id) return self.test_label -class _TaskBase(object): +class RTATask(object): + """ + Base class for conveniently constructing params to :meth:`RTA.conf` + + This class represents an RT-App task which may contain multiple phases. It + implements ``__add__`` so that using ``+`` on two tasks concatenates their + phases. For example ``Ramp() + Periodic()`` would yield an ``RTATask`` that + executes the default phases for ``Ramp`` followed by the default phases for + ``Periodic``. + """ def __init__(self): self._task = {} def get(self): + """ + Return a dict that can be passed as an element of the ``params`` field + to :meth:`RTA.conf`. + """ return self._task def __add__(self, next_phases): @@ -549,35 +587,32 @@ class _TaskBase(object): return self -class Ramp(_TaskBase): +class Ramp(RTATask): + """ + Configure a ramp load. + + This class defines a task which load is a ramp with a configured number + of steps according to the input parameters. + + :param start_pct: the initial load percentage. + :param end_pct: the final load percentage. + :param delta_pct: the load increase/decrease at each step, in percentage + points. + :param time_s: the duration in seconds of each load step. + :param period_ms: the period used to define the load in [ms] + :param delay_s: the delay in seconds before ramp start + :param loops: number of time to repeat the ramp, with the specified delay in + between + + :param sched: the scheduler configuration for this task + :type sched: dict + + :param cpus: the list of CPUs on which task can run + :type cpus: list + """ def __init__(self, start_pct=0, end_pct=100, delta_pct=10, time_s=1, period_ms=100, delay_s=0, loops=1, sched=None, cpus=None): - """ - Configure a ramp load. - - This class defines a task which load is a ramp with a configured number - of steps according to the input parameters. - - Args: - start_pct (int, [0-100]): the initial load [%], (default 0[%]) - end_pct (int, [0-100]): the final load [%], (default 100[%]) - delta_pct (int, [0-100]): the load increase/decrease [%], - default: 10[%] - increase if start_prc < end_prc - decrease if start_prc > end_prc - time_s (float): the duration in [s] of each load step - default: 1.0[s] - period_ms (float): the period used to define the load in [ms] - default: 100.0[ms] - delay_s (float): the delay in [s] before ramp start - default: 0[s] - loops (int): number of time to repeat the ramp, with the - specified delay in between - default: 0 - sched (dict): the scheduler configuration for this task - cpus (list): the list of CPUs on which task can run - """ super(Ramp, self).__init__() self._task['cpus'] = cpus @@ -611,73 +646,67 @@ class Ramp(_TaskBase): self._task['phases'] = phases class Step(Ramp): + """ + Configure a step load. + + This class defines a task which load is a step with a configured initial and + final load. Using the ``loops`` param, this can be used to create a workload + that alternates between two load values. + + :param start_pct: the initial load percentage. + :param end_pct: the final load percentage. + :param time_s: the duration in seconds of each load step. + :param period_ms: the period used to define the load in [ms] + :param delay_s: the delay in seconds before ramp start + :param loops: number of time to repeat the step, with the specified delay in + between. + + :param sched: the scheduler configuration for this task + :type sched: dict + + :param cpus: the list of CPUs on which task can run + :type cpus: list + """ def __init__(self, start_pct=0, end_pct=100, time_s=1, period_ms=100, delay_s=0, loops=1, sched=None, cpus=None): - """ - Configure a step load. - - This class defines a task which load is a step with a configured - initial and final load. - - Args: - start_pct (int, [0-100]): the initial load [%] - default 0[%]) - end_pct (int, [0-100]): the final load [%] - default 100[%] - time_s (float): the duration in [s] of the start and end load - default: 1.0[s] - period_ms (float): the period used to define the load in [ms] - default 100.0[ms] - delay_s (float): the delay in [s] before ramp start - default 0[s] - loops (int): number of time to repeat the ramp, with the - specified delay in between - default: 0 - sched (dict): the scheduler configuration for this task - cpus (list): the list of CPUs on which task can run - """ delta_pct = abs(end_pct - start_pct) super(Step, self).__init__(start_pct, end_pct, delta_pct, time_s, period_ms, delay_s, loops, sched, cpus) -class Pulse(_TaskBase): +class Pulse(RTATask): + """ + Configure a pulse load. + + This class defines a task which load is a pulse with a configured + initial and final load. + + The main difference with the 'step' class is that a pulse workload is + by definition a 'step down', i.e. the workload switch from an finial + load to a final one which is always lower than the initial one. + Moreover, a pulse load does not generate a sleep phase in case of 0[%] + load, i.e. the task ends as soon as the non null initial load has + completed. + + :param start_pct: the initial load percentage. + :param end_pct: the final load percentage. Must be lower than ``start_pct`` + value. If end_pct is 0, the task end after the ``start_pct`` + period has completed. + :param time_s: the duration in seconds of each load step. + :param period_ms: the period used to define the load in [ms] + :param delay_s: the delay in seconds before ramp start + :param loops: number of time to repeat the pulse, with the specified delay + in between. + + :param sched: the scheduler configuration for this task + :type sched: dict + + :param cpus: the list of CPUs on which task can run + :type cpus: list + """ def __init__(self, start_pct=100, end_pct=0, time_s=1, period_ms=100, delay_s=0, loops=1, sched=None, cpus=None): - """ - Configure a pulse load. - - This class defines a task which load is a pulse with a configured - initial and final load. - - The main difference with the 'step' class is that a pulse workload is - by definition a 'step down', i.e. the workload switch from an finial - load to a final one which is always lower than the initial one. - Moreover, a pulse load does not generate a sleep phase in case of 0[%] - load, i.e. the task ends as soon as the non null initial load has - completed. - - Args: - start_pct (int, [0-100]): the initial load [%] - default: 0[%] - end_pct (int, [0-100]): the final load [%] - default: 100[%] - NOTE: must be lower than start_pct value - time_s (float): the duration in [s] of the start and end load - default: 1.0[s] - NOTE: if end_pct is 0, the task end after the - start_pct period completed - period_ms (float): the period used to define the load in [ms] - default: 100.0[ms] - delay_s (float): the delay in [s] before ramp start - default: 0[s] - loops (int): number of time to repeat the ramp, with the - specified delay in between - default: 0 - sched (dict): the scheduler configuration for this task - cpus (list): the list of CPUs on which task can run - """ super(Pulse, self).__init__() if end_pct >= start_pct: @@ -709,30 +738,25 @@ class Pulse(_TaskBase): class Periodic(Pulse): + """ + Configure a periodic load. This is the simplest type of RTA task. - def __init__(self, duty_cycle_pct=50, duration_s=1, period_ms=100, - delay_s=0, sched=None, cpus=None): - """ - Configure a periodic load. + This class defines a task which load is periodic with a configured + period and duty-cycle. - This class defines a task which load is periodic with a configured - period and duty-cycle. + :param duty_cycle_pct: the load percentage. + :param duration_s: the total duration in seconds of the task. + :param period_ms: the period used to define the load in milliseconds. + :param delay_s: the delay in seconds before starting the workload. - This class is a specialization of the 'pulse' class since a periodic - load is generated as a sequence of pulse loads. + :param sched: the scheduler configuration for this task + :type sched: dict - Args: - cuty_cycle_pct (int, [0-100]): the pulses load [%] - default: 50[%] - duration_s (float): the duration in [s] of the entire workload - default: 1.0[s] - period_ms (float): the period used to define the load in [ms] - default: 100.0[ms] - delay_s (float): the delay in [s] before ramp start - default: 0[s] - sched (dict): the scheduler configuration for this task + :param cpus: the list of CPUs on which task can run + :type cpus: list + """ - """ + def __init__(self, duty_cycle_pct=50, duration_s=1, period_ms=100, + delay_s=0, sched=None, cpus=None): super(Periodic, self).__init__(duty_cycle_pct, 0, duration_s, period_ms, delay_s, 1, sched, cpus) - diff --git a/libs/wlgen/wlgen/workload.py b/libs/wlgen/wlgen/workload.py index 6e65ca9cb1144aa5b799aca9bd3bc374e178bf56..6155ee5a8846cbeaee580024a3850c4c0b263695 100644 --- a/libs/wlgen/wlgen/workload.py +++ b/libs/wlgen/wlgen/workload.py @@ -24,11 +24,24 @@ from time import sleep import logging class Workload(object): + """ + Base class for workload specifications + + To use this class, you'll need to instantiate it, then call :meth:`conf` on + the instance. + + :param target: Devlib target to run workload on. May be None, in which case + an RT-App configuration file can be generated but the + workload cannot be run, and calibration features will be + missing. + :param name: Human-readable name for the workload + :param calibration: CPU calibration specification. Can be obtained from + :meth:`RTA.calibration`. + """ def __init__(self, target, - name, - calibration=None): + name): # Target device confguration self.target = target @@ -42,23 +55,13 @@ class Workload(object): # The dictionary of tasks descriptors generated by this workload self.tasks = {} - # CPU load calibration values, measured on each core - self.calibration = calibration - # The cpus on which the workload will be executed - # NOTE: for the time being we support just a single CPU self.cpus = None - # The cgroup on which the workload will be executed + # The cgroup on which the workload will be executed # NOTE: requires cgroups to be properly configured and associated # tools deployed on the target self.cgroup = None - self.cgroup_cmd = '' - - # taskset configuration to constraint workload execution on a specified - # set of CPUs - self.taskset = None - self.taskset_cmd = '' # The command to execute a workload (defined by a derived class) self.command = None @@ -102,6 +105,21 @@ class Workload(object): self.steps[step](kwords) def setCallback(self, step, func): + """ + Add a callback to be called during an execution stage. + + Intended for use by subclasses. Only one callback can exist for each + stage. Available callback stages are: + + "postrun" + Called after the workload has finished executing, unless it's being + run in the background. Receives a ``params`` dictionary with + ``params["destdir"]`` set to the host directory to store workload + output in. + + :param step: Name of the step at which to call the callback + :param func: Callback function + """ self._log.debug('Setup step [%s] callback to [%s] function', step, func.__name__) self.steps[step] = func @@ -121,6 +139,7 @@ class Workload(object): sched={'policy': 'OTHER'}, run_dir=None, exc_id=0): + """Configure workload. See documentation for subclasses""" self.cpus = cpus self.sched = sched @@ -137,6 +156,7 @@ class Workload(object): # Initialize run folder if self.run_dir is None: self.run_dir = self.target.working_directory + self.target.execute('mkdir -p {}'.format(self.run_dir)) # Configure a profile workload if kind == 'profile': @@ -174,8 +194,9 @@ class Workload(object): :type cgroup: str :param cpus: the CPUs on which to run the workload. - NOTE: if specified it overrides the CPUs specified at - configuration time + + .. note:: if specified it overrides the CPUs specified at + configuration time :type cpus: list(int) :param background: run the workload in background. In this case the @@ -184,8 +205,8 @@ class Workload(object): collection :type background: bool - :param out_dir: output directory where to store the collected trace (if - any) + :param out_dir: output directory where to store the collected trace or + other workload report (if any) :type out_dir: str :param as_root: run the workload as root on the target @@ -214,15 +235,19 @@ class Workload(object): # Prepend eventually required taskset command if cpus or self.cpus: cpus_mask = self.getCpusMask(cpus if cpus else self.cpus) - self.taskset_cmd = '{}/taskset 0x{:X}'\ + taskset_cmd = '{}/taskset 0x{:X}'\ .format(self.target.executables_directory, cpus_mask) _command = '{} {}'\ - .format(self.taskset_cmd, _command) + .format(taskset_cmd, _command) - if self.cgroup and hasattr(self.target, 'cgroups'): - # Get a reference to the CGroup to use - _command = self.target.cgroups.run_into_cmd(self.cgroup, _command) + if self.cgroup: + if hasattr(self.target, 'cgroups'): + _command = self.target.cgroups.run_into_cmd(self.cgroup, + _command) + else: + raise ValueError('To run workload in a cgroup, add "cgroups" ' + 'devlib module to target/test configuration') # Start FTrace (if required) if ftrace: @@ -276,53 +301,6 @@ class Workload(object): def getOutput(self, step='executor'): return self.output[step] - def getTasks(self, dataframe=None, task_names=None, - name_key='comm', pid_key='pid'): - # """ Helper function to get PIDs of specified tasks - # - # This method requires a Pandas dataset in input to be used to - # fiter our the PIDs of all the specified tasks. - # In a dataset is not provided, previouslt filtered PIDs are - # returned. If a list of task names is not provided, the workload - # defined task names is used instead. - # The specified dataframe must provide at least two columns - # reporting the task name and the task PID. The default values of - # this colums could be specified using the provided parameters. - # - # :param task_names: The list of tasks to get the PID of (by default - # the workload defined tasks) - # :param dataframe: A Pandas datafram containing at least 'pid' and - # 'task name' columns - # If None, the previously filtered PIDs are - # returned - # :param name_key: The name of the dataframe columns containing - # task names - # :param pid_key: The name of the dataframe columns containing - # task PIDs - # """ - if dataframe is None: - return self.tasks - if task_names is None: - task_names = self.tasks.keys() - self._log.debug('Lookup dataset for tasks...') - for task_name in task_names: - results = dataframe[dataframe[name_key] == task_name]\ - [[name_key,pid_key]] - if len(results)==0: - self._log.error(' task %16s NOT found', task_name) - continue - (name, pid) = results.head(1).values[0] - if name != task_name: - self._log.error(' task %16s NOT found', task_name) - continue - if task_name not in self.tasks: - self.tasks[task_name] = {} - pids = list(results[pid_key].unique()) - self.tasks[task_name]['pid'] = pids - self._log.info(' task %16s found, pid: %s', - task_name, self.tasks[task_name]['pid']) - return self.tasks - def listAll(self, kill=False): # Show all the instances for the current executor tasks = self.target.run('ps | grep {0:s}'.format(self.executor)) @@ -337,4 +315,3 @@ class Workload(object): return self._log.info('Killing all [%s] instances:', self.executor) self.listAll(True) - diff --git a/tests/lisa/test_wlgen.py b/tests/lisa/test_wlgen.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd05661833f2b418a8a71fa6ab31b59b93a5b27 --- /dev/null +++ b/tests/lisa/test_wlgen.py @@ -0,0 +1,355 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import json +import os +import shutil +from unittest import TestCase + +from devlib import LocalLinuxTarget, Platform + +from wlgen import RTA, Periodic, Ramp +from wlgen import PerfMessaging + +dummy_calibration = {} + +class TestTarget(LocalLinuxTarget): + """ + Devlib target for self-testing LISA + + Uses LocalLinuxTarget configured to disallow using root. + Adds facility to record the commands that were executed for asserting LISA + behaviour. + """ + def __init__(self): + self.execute_calls = [] + super(TestTarget, self).__init__(platform=Platform(), + load_default_modules=False, + connection_settings={'unrooted': True}) + + def execute(self, *args, **kwargs): + self.execute_calls.append((args, kwargs)) + return super(TestTarget, self).execute(*args, **kwargs) + + @property + def executed_commands(self): + return [args[0] if args else kwargs['command'] + for args, kwargs in self.execute_calls] + + def clear_execute_calls(self): + self.execute_calls = [] + +class LisaSelfBase(TestCase): + """ + Base class for LISA self-tests + + Creates and sets up a TestTarget. + + Provides directory paths to use for output files. Deletes those paths if + they already exist, to try and provide a clean test environment. This + doesn't create those paths, tests should create them if necessary. + """ + + tools = [] + """Tools to install on the 'target' before each test""" + + @property + def target_run_dir(self): + """Unique directory to use for creating files on the 'target'""" + return os.path.join(self.target.working_directory, + 'lisa_target_{}'.format(self.__class__.__name__)) + + @property + def host_out_dir(self): + """Unique directory to use for creating files on the host""" + return os.path.join( + os.getenv('LISA_HOME'), 'results', + 'lisa_selftest_out_{}'.format(self.__class__.__name__)) + + def setUp(self): + self.target = TestTarget() + + tools_path = os.path.join(os.getenv('LISA_HOME'), + 'tools', self.target.abi) + self.target.setup([os.path.join(tools_path, tool) + for tool in self.tools]) + + if self.target.directory_exists(self.target_run_dir): + self.target.remove(self.target_run_dir) + + if os.path.isdir(self.host_out_dir): + shutil.rmtree(self.host_out_dir) + + self.target.clear_execute_calls() + +class RTABase(LisaSelfBase): + """ + Common functionality for testing RTA + + Doesn't have "Test" in the name so that nosetests doesn't try to run it + directly + """ + + tools = ['rt-app'] + + def get_expected_command(self, rta_wload): + """Return the rt-app command we should execute when `run` is called""" + rta_path = os.path.join(self.target.executables_directory, 'rt-app') + json_path = os.path.join(rta_wload.run_dir, rta_wload.json) + return '{} {} 2>&1'.format(rta_path, json_path) + + def setUp(self): + super(RTABase, self).setUp() + + # Can't calibrate rt-app because: + # - Need to set performance governor + # - Need to use SCHED_FIFO + high priority + # We probably don't have permissions so use a dummy calibration. + self.calibration = {c: 100 + for c in range(len(self.target.cpuinfo.cpu_names))} + + os.makedirs(self.host_out_dir) + + def assert_output_file_exists(self, path): + """Assert that a file was created in host_out_dir""" + path = os.path.join(self.host_out_dir, path) + self.assertTrue(os.path.isfile(path), + 'No output file {} from rt-app'.format(path)) + +class TestRTAProfile(RTABase): + def test_profile_periodic_smoke(self): + """ + Smoketest Periodic rt-app workload + + Creates a workload using Periodic, tests that the JSON has the expected + content, then tests that it can be run. + """ + rtapp = RTA(self.target, name='test', calibration=self.calibration) + + rtapp.conf( + kind = 'profile', + params = { + 'task_p20': Periodic( + period_ms = 100, + duty_cycle_pct = 20, + duration_s = 5, + ).get(), + }, + run_dir=self.target_run_dir + ) + + with open(rtapp.json) as f: + conf = json.load(f) + + [phase] = conf['tasks']['task_p20']['phases'].values() + self.assertDictEqual(phase, { + 'loop': 50, + 'run': 20000, + 'timer': { + 'period': 100000, + 'ref': 'task_p20' + } + }) + rtapp.run(out_dir=self.host_out_dir) + + rtapp_cmds = [c for c in self.target.executed_commands if 'rt-app' in c] + self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) + + self.assert_output_file_exists('output.log') + self.assert_output_file_exists('rt-app-task_p20-0.log') + self.assert_output_file_exists('test_00.json') + +class TestRTAComposition(RTABase): + def test_composition(self): + """ + Test RTA task composition with __add__ + + Creates a composed workload by +-ing RTATask objects, tests that the + JSON has the expected content, then tests running the workload + """ + rtapp = RTA(self.target, name='test', calibration=self.calibration) + + light = Periodic(duty_cycle_pct=10, duration_s=1.0, period_ms=10) + + start_pct = 10 + end_pct = 90 + delta_pct = 20 + num_ramp_phases = ((end_pct - start_pct) / delta_pct) + 1 + ramp = Ramp(start_pct=start_pct, end_pct=end_pct, delta_pct=delta_pct, + time_s=1, period_ms=50) + + heavy = Periodic(duty_cycle_pct=90, duration_s=0.1, period_ms=100) + + lrh_task = light + ramp + heavy + + rtapp.conf( + kind = 'profile', + params = { + 'task_ramp': lrh_task.get() + }, + run_dir=self.target_run_dir + ) + + with open(rtapp.json) as f: + conf = json.load(f, object_pairs_hook=OrderedDict) + + phases = conf['tasks']['task_ramp']['phases'].values() + + exp_phases = [ + # Light phase: + { + "loop": 100, + "run": 1000, + "timer": { + "period": 10000, + "ref": "task_ramp" + } + }, + # Ramp phases: + { + "loop": 20, + "run": 5000, + "timer": { + "period": 50000, + "ref": "task_ramp" + } + }, + { + "loop": 20, + "run": 15000, + "timer": { + "period": 50000, + "ref": "task_ramp" + } + }, + { + "loop": 20, + "run": 25000, + "timer": { + "period": 50000, + "ref": "task_ramp" + } + }, + { + "loop": 20, + "run": 35000, + "timer": { + "period": 50000, + "ref": "task_ramp" + } + }, + { + "loop": 20, + "run": 45000, + "timer": { + "period": 50000, + "ref": "task_ramp" + } + }, + # Heavy phase: + { + "loop": 1, + "run": 90000, + "timer": { + "period": 100000, + "ref": "task_ramp" + } + }] + + self.assertListEqual(phases, exp_phases) + + rtapp.run(out_dir=self.host_out_dir) + + rtapp_cmds = [c for c in self.target.executed_commands if 'rt-app' in c] + self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) + + self.assert_output_file_exists('output.log') + self.assert_output_file_exists('rt-app-task_ramp-0.log') + self.assert_output_file_exists('test_00.json') + + +class TestRTACustom(RTABase): + def test_custom_smoke(self): + """ + Test RTA custom workload + + Creates an rt-app workload using 'custom' and checks that the json + roughly matches the file we provided. If we have root, attempts to run + the workload. + """ + + json_path = os.path.join(os.getenv('LISA_HOME'), + 'assets', 'mp3-short.json') + rtapp = RTA(self.target, name='test', calibration=self.calibration) + + # Configure this RTApp instance to: + rtapp.conf(kind='custom', params=json_path, duration=5, + run_dir=self.target_run_dir) + + with open(rtapp.json) as f: + conf = json.load(f) + + # Convert to str because unicode + tasks = set([str(k) for k in conf['tasks'].keys()]) + self.assertSetEqual( + tasks, + set(['AudioTick', 'AudioOut', 'AudioTrack', + 'mp3.decoder', 'OMXCall'])) + + # Would like to try running the workload but mp3-short.json has nonzero + # 'priority' fields, and we probably don't have permission for that + # unless we're root. + if self.target.is_rooted: + rtapp.run(out_dir=self.host_out_dir) + + rtapp_cmds = [c for c in self.target.executed_commands + if 'rt-app' in c] + self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) + + self.assert_output_file_exists('output.log') + self.assert_output_file_exists('test_00.json') + +class TestHackBench(LisaSelfBase): + tools = ['perf'] + + def test_hackbench_smoke(self): + """ + Test PerfMessaging hackbench workload + + Runs a 'hackbench' workload and tests that the expected output was + produced. + """ + perf = PerfMessaging(self.target, 'hackbench') + perf.conf(group=1, loop=100, pipe=True, thread=True, + run_dir=self.target_run_dir) + + os.makedirs(self.host_out_dir) + perf.run(out_dir=self.host_out_dir) + + try: + with open(os.path.join('.', 'performance.json'), 'r') as fh: + perf_json = json.load(fh) + except IOError: + raise AssertionError( + "PerfMessaging didn't create performance report file") + + for field in ['ctime', 'performance']: + msg = 'PerfMessaging performance report missing {} field'\ + .format(field) + self.assertIn(field, perf_json, msg) +