diff --git a/README.md b/README.md index dca0bbc0e8b572a9217ede8b407b75cc3a990138..ca9f60f8ff49c9ded2a058095408c882d3164130 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ just run these few steps: 3. Run the EAS RFC test using the standard nosetest command: ```sh - $ nosetests -v tests/eas/rfc.py:EAS + $ nosetests -v tests/eas/rfc.py ``` 4. Wait for the test to complete and than you can report the results with: @@ -278,7 +278,23 @@ Specifically it demonstrates how to: the TRAPpy library 7. visualize some simple performance metrics for the tasks -## 4. Regression test example 1 +## 4. Get confident with the tests execution engine + +Tests are usually performed on a set of data collected while executing properly +defined experiments. An experiment is usually defined by a specific **target +configuration** which is used to run a certain **workload mix**. +Thus, to run experiments we usually need the support of a proper module which +configure a target and execute a workload on it in order to collect the data +required for a test. + +The __Executor__ module is a simple yet effective support to collect all the +data required for a test. This notebook: +[utils/executor_example.ipynb](http://localhost:8888/notebooks/utils/executor_example.ipynb) +is a simple example of how to use the Executor collect experimental data for a +predefined set of target configurations and by running a specified set of +workloads. + +## 5. Regression test example 1 One of the main aims of LISA is to become a repository for regression tests on scheduler and power management behavior. A common pattern for @@ -313,7 +329,7 @@ This notebook is a good example of using LISA to build a new set of experiments which can then be transformed into a standalone regression test. -## 5. Regression test example 2 +## 6. Regression test example 2 Once a new set of tests have been defined and verified, perhaps by using a notebook to develop them, they can be transformed into a standalone diff --git a/ipynb/utils/executor_example.ipynb b/ipynb/utils/executor_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..26dfe7e94b7f27b5ea6235cfeacc0b2cfae7b7c0 --- /dev/null +++ b/ipynb/utils/executor_example.ipynb @@ -0,0 +1,410 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import logging\n", + "reload(logging)\n", + "log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'\n", + "logging.basicConfig(format=log_fmt)\n", + "\n", + "# Change to info once the notebook runs ok\n", + "#logging.getLogger().setLevel(logging.DEBUG)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Populating the interactive namespace from numpy and matplotlib\n" + ] + } + ], + "source": [ + "%pylab inline\n", + "\n", + "import datetime\n", + "import devlib\n", + "import os\n", + "import json\n", + "import pandas as pd\n", + "import re\n", + "import subprocess\n", + "import trappy\n", + "from trappy.plotter.Utils import get_trace_event_data\n", + "\n", + "import matplotlib.gridspec as gridspec\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Support to access the remote target\n", + "import devlib\n", + "from env import TestEnv\n", + "\n", + "from executor import Executor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Target Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Setup a target configuration\n", + "my_target_conf = {\n", + " \n", + " # Target platform and board\n", + " \"platform\" : 'linux',\n", + " \"board\" : 'aboard',\n", + " \n", + " # Target board IP/MAC address\n", + " \"host\" : '192.168.0.1',\n", + " \n", + " # Login credentials\n", + " \"username\" : 'root',\n", + " \"password\" : 'test0000',\n", + "\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tests Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false, + "scrolled": false + }, + "outputs": [], + "source": [ + "my_tests_conf = {\n", + "\n", + " # This is the output folder where all the results will be collected\n", + " \"id\" : \"ChromeOS_Profiling\",\n", + "\n", + " # Platform configurations to test\n", + " \"confs\" : [\n", + " {\n", + " \"tag\" : \"base\",\n", + " \"flags\" : \"ftrace\",\n", + " \"sched_features\" : \"NO_ENERGY_AWARE\",\n", + " \"cpufreq\" : {\n", + " \"governor\" : \"performance\",\n", + " },\n", + " },\n", + " {\n", + " \"tag\" : \"eas\",\n", + " \"flags\" : \"ftrace\",\n", + " \"sched_features\" : \"ENERGY_AWARE\",\n", + " \"cpufreq\" : {\n", + " \"governor\" : \"performance\",\n", + " },\n", + " },\n", + " ],\n", + " \n", + " # Workloads to run (on each platform configuration)\n", + " \"wloads\" : {\n", + " \"perf\" : {\n", + " \"type\" : \"perf_bench\",\n", + " \"conf\" : {\n", + " \"class\" : \"messaging\",\n", + " \"params\" : {\n", + " \"group\" : 1,\n", + " \"loop\" : 10,\n", + " \"pipe\" : True,\n", + " \"thread\": True,\n", + " }\n", + " }\n", + " },\n", + " \"rta\" : {\n", + " \"type\" : \"rt-app\",\n", + " \"loadref\" : \"big\",\n", + " \"conf\" : {\n", + " \"class\" : \"profile\",\n", + " \"params\" : {\n", + " \"p20\" : {\n", + " \"kind\" : \"periodic\",\n", + " \"params\" : {\n", + " \"duty_cycle_pct\" : 20,\n", + " },\n", + " },\n", + " },\n", + " },\n", + " },\n", + " },\n", + " \n", + " # Number of iterations for each workload\n", + " \"iterations\" : 1,\n", + " \n", + " # FTrace events to collect and functions to profile for all the\n", + " # tests configuration which have the \"ftrace\" flag enabled\n", + " \"ftrace\" : {\n", + " \"events\" : [\n", + " \"sched_switch\",\n", + " \"sched_wakeup\",\n", + " \"sched_wakeup_new\",\n", + " \"cpu_frequency\",\n", + " ],\n", + " \"buffsize\" : 80 * 1024,\n", + " },\n", + " \n", + " # Tools required by the experiments\n", + " \"tools\" : [ 'trace-cmd', 'perf' ],\n", + " \n", + " # Modules required by these experiments\n", + " \"modules\" : [ 'bl', 'cpufreq' ],\n", + "\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tests execution" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "04:04:11 INFO : Target - Loading custom (inline) test configuration\n", + "04:04:11 INFO : Target - Using base path: /home/derkling/Code/schedtest\n", + "04:04:11 INFO : Target - Loading custom (inline) target configuration\n", + "04:04:11 INFO : Target - Loading custom (inline) test configuration\n", + "04:04:11 INFO : Target - Devlib modules to load: ['bl', 'cpufreq']\n", + "04:04:11 INFO : Target - Connecting linux target with: {'username': 'root', 'host': '192.168.0.1', 'password': 'test0000'}\n", + "04:04:16 INFO : Target - Initializing target workdir [/root/devlib-target]\n", + "04:04:20 INFO : Target topology: [[0, 1], [2, 3]]\n", + "04:04:23 INFO : Platform - Loading default EM [/home/derkling/Code/schedtest/libs/utils/platforms/aboard.json]...\n", + "04:04:24 INFO : FTrace - Enabled events:\n", + "04:04:24 INFO : FTrace - ['sched_switch', 'sched_wakeup', 'sched_wakeup_new', 'cpu_frequency']\n", + "04:04:24 INFO : FTrace - None\n", + "04:04:24 INFO : TestEnv - Set results folder to:\n", + "04:04:24 INFO : TestEnv - /home/derkling/Code/schedtest/results/ChromeOS_Profiling\n", + "04:04:24 INFO : TestEnv - Experiment results available also in:\n", + "04:04:24 INFO : TestEnv - /home/derkling/Code/schedtest/results_latest\n", + "04:04:24 INFO : \n", + "04:04:24 INFO : ################################################################################\n", + "04:04:24 INFO : Executor - Experiments configuration\n", + "04:04:24 INFO : ################################################################################\n", + "04:04:24 INFO : Executor - Configured to run:\n", + "04:04:24 INFO : Executor - 2 targt configurations:\n", + "04:04:24 INFO : Executor - base, eas\n", + "04:04:24 INFO : Executor - 2 workloads (1 iterations each)\n", + "04:04:24 INFO : Executor - rta, perf\n", + "04:04:24 INFO : Executor - Total: 4 experiments\n", + "04:04:24 INFO : Executor - Results will be collected under:\n", + "04:04:24 INFO : Executor - /home/derkling/Code/schedtest/results/ChromeOS_Profiling\n" + ] + } + ], + "source": [ + "executor = Executor(my_target_conf, my_tests_conf)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "04:04:24 INFO : \n", + "04:04:24 INFO : ################################################################################\n", + "04:04:24 INFO : Executor - Experiments execution\n", + "04:04:24 INFO : ################################################################################\n", + "04:04:24 INFO : \n", + "04:04:24 INFO : ================================================================================\n", + "04:04:24 INFO : TargetConfig - configuring target for [base] experiments\n", + "04:04:26 INFO : CPUFreq - Configuring all CPUs to use [performance] governor\n", + "04:04:27 INFO : WlGen - Setup new workload rta\n", + "04:04:27 INFO : RTApp - Workload duration defined by longest task\n", + "04:04:27 INFO : RTApp - Default policy: SCHED_OTHER\n", + "04:04:27 INFO : RTApp - ------------------------\n", + "04:04:27 INFO : RTApp - task [task_p20], sched: using default policy\n", + "04:04:27 INFO : RTApp - | calibration CPU: 2\n", + "04:04:27 INFO : RTApp - | loops count: 1\n", + "04:04:27 INFO : RTApp - + phase_000001: duration 1.000000 [s] (10 loops)\n", + "04:04:27 INFO : RTApp - | period 100000 [us], duty_cycle 20 %\n", + "04:04:27 INFO : RTApp - | run_time 20000 [us], sleep_time 80000 [us]\n", + "04:04:28 INFO : ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "04:04:28 INFO : Executor - Experiment 1/4, [base:rta] 1/1\n", + "04:04:28 WARNING : Executor - FTrace events collection enabled\n", + "04:04:34 INFO : WlGen - WlGen [start]: /root/devlib-target/bin/rt-app /root/devlib-target/run_dir/rta_00.json\n", + "04:04:40 INFO : WlGen - Setup new workload perf\n", + "04:04:41 INFO : ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "04:04:41 INFO : Executor - Experiment 2/4, [base:perf] 1/1\n", + "04:04:41 WARNING : Executor - FTrace events collection enabled\n", + "04:04:47 INFO : WlGen - WlGen [start]: /root/devlib-target/bin/perf bench sched messaging --pipe --thread --group 1 --loop 10\n", + "04:04:47 INFO : PerfBench - Completion time: 0.016000, Performance 62.500000\n", + "04:04:52 INFO : \n", + "04:04:52 INFO : ================================================================================\n", + "04:04:52 INFO : TargetConfig - configuring target for [eas] experiments\n", + "04:04:54 INFO : CPUFreq - Configuring all CPUs to use [performance] governor\n", + "04:04:54 INFO : WlGen - Setup new workload rta\n", + "04:04:54 INFO : RTApp - Workload duration defined by longest task\n", + "04:04:54 INFO : RTApp - Default policy: SCHED_OTHER\n", + "04:04:54 INFO : RTApp - ------------------------\n", + "04:04:54 INFO : RTApp - task [task_p20], sched: using default policy\n", + "04:04:54 INFO : RTApp - | calibration CPU: 2\n", + "04:04:54 INFO : RTApp - | loops count: 1\n", + "04:04:54 INFO : RTApp - + phase_000001: duration 1.000000 [s] (10 loops)\n", + "04:04:54 INFO : RTApp - | period 100000 [us], duty_cycle 20 %\n", + "04:04:54 INFO : RTApp - | run_time 20000 [us], sleep_time 80000 [us]\n", + "04:04:55 INFO : ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "04:04:55 INFO : Executor - Experiment 3/4, [eas:rta] 1/1\n", + "04:04:55 WARNING : Executor - FTrace events collection enabled\n", + "04:05:00 INFO : WlGen - WlGen [start]: /root/devlib-target/bin/rt-app /root/devlib-target/run_dir/rta_00.json\n", + "04:05:06 INFO : WlGen - Setup new workload perf\n", + "04:05:06 INFO : ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "04:05:06 INFO : Executor - Experiment 4/4, [eas:perf] 1/1\n", + "04:05:06 WARNING : Executor - FTrace events collection enabled\n", + "04:05:12 INFO : WlGen - WlGen [start]: /root/devlib-target/bin/perf bench sched messaging --pipe --thread --group 1 --loop 10\n", + "04:05:12 INFO : PerfBench - Completion time: 0.021000, Performance 47.619048\n", + "04:05:17 INFO : \n", + "04:05:17 INFO : ################################################################################\n", + "04:05:17 INFO : Executor - Experiments execution completed\n", + "04:05:17 INFO : ################################################################################\n", + "04:05:17 INFO : Executor - Results available in:\n", + "04:05:17 INFO : Executor - /home/derkling/Code/schedtest/results/ChromeOS_Profiling\n" + ] + } + ], + "source": [ + "executor.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[01;34m/home/derkling/Code/schedtest/results/ChromeOS_Profiling\u001b[00m\r\n", + "├── \u001b[01;34mperf_bench_messaging:base:perf\u001b[00m\r\n", + "│   ├── \u001b[01;34m1\u001b[00m\r\n", + "│   │   ├── output.log\r\n", + "│   │   ├── performance.json\r\n", + "│   │   └── trace.dat\r\n", + "│   ├── kernel.config\r\n", + "│   ├── kernel.version\r\n", + "│   └── platform.json\r\n", + "├── \u001b[01;34mperf_bench_messaging:eas:perf\u001b[00m\r\n", + "│   ├── \u001b[01;34m1\u001b[00m\r\n", + "│   │   ├── output.log\r\n", + "│   │   ├── performance.json\r\n", + "│   │   └── trace.dat\r\n", + "│   ├── kernel.config\r\n", + "│   ├── kernel.version\r\n", + "│   └── platform.json\r\n", + "├── \u001b[01;34mrtapp:base:rta\u001b[00m\r\n", + "│   ├── \u001b[01;34m1\u001b[00m\r\n", + "│   │   ├── output.log\r\n", + "│   │   ├── rta_00.json\r\n", + "│   │   ├── rt-app-task_p20-0.log\r\n", + "│   │   └── trace.dat\r\n", + "│   ├── kernel.config\r\n", + "│   ├── kernel.version\r\n", + "│   └── platform.json\r\n", + "├── \u001b[01;34mrtapp:base:single\u001b[00m\r\n", + "│   ├── \u001b[01;34m1\u001b[00m\r\n", + "│   │   ├── output.log\r\n", + "│   │   ├── rt-app-task_p20-0.log\r\n", + "│   │   ├── single_00.json\r\n", + "│   │   └── trace.dat\r\n", + "│   ├── kernel.config\r\n", + "│   ├── kernel.version\r\n", + "│   └── platform.json\r\n", + "├── \u001b[01;34mrtapp:eas:rta\u001b[00m\r\n", + "│   ├── \u001b[01;34m1\u001b[00m\r\n", + "│   │   ├── output.log\r\n", + "│   │   ├── rta_00.json\r\n", + "│   │   ├── rt-app-task_p20-0.log\r\n", + "│   │   └── trace.dat\r\n", + "│   ├── kernel.config\r\n", + "│   ├── kernel.version\r\n", + "│   └── platform.json\r\n", + "└── \u001b[01;34mrtapp:eas:single\u001b[00m\r\n", + " ├── \u001b[01;34m1\u001b[00m\r\n", + " │   ├── output.log\r\n", + " │   ├── rt-app-task_p20-0.log\r\n", + " │   ├── single_00.json\r\n", + " │   └── trace.dat\r\n", + " ├── kernel.config\r\n", + " ├── kernel.version\r\n", + " └── platform.json\r\n", + "\r\n", + "12 directories, 40 files\r\n" + ] + } + ], + "source": [ + "!tree {executor.te.res_dir}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/utils/__init__.py b/libs/utils/__init__.py index 6f42c3f9ac0662dcf12e2305506d9b7332e91fde..489f265a56ea0778330392a08932c9325eb8e253 100644 --- a/libs/utils/__init__.py +++ b/libs/utils/__init__.py @@ -18,6 +18,7 @@ """Initialization for utils module""" from env import TestEnv +from executor import Executor from energy import EnergyMeter from conf import JsonConf diff --git a/libs/utils/executor.py b/libs/utils/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3eaad24009f425e3c30415e172f8ec95009663 --- /dev/null +++ b/libs/utils/executor.py @@ -0,0 +1,545 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from bart.common.Analyzer import Analyzer +import collections +import datetime +import gzip +import json +import os +import re +import time +import trappy + +# Configure logging +import logging +reload(logging) +logging.basicConfig( + format='%(asctime)-9s %(levelname)-8s: %(message)s', + level=logging.INFO, + datefmt='%I:%M:%S') + +# Add support for Test Environment configuration +from env import TestEnv + +# Add JSON parsing support +from conf import JsonConf + +import wlgen + +class Executor(): + + def __init__(self, target_conf=None, tests_conf=None): + """ + Tests Executor + + A tests executor is a module which support the execution of a + configured set of experiments. Each experiment is composed by: + - a target configuration + - a worload to execute + + The executor module can be configured to run a set of workloads + (wloads) in each different target configuration of a specified set + (confs). These wloads and confs can be specified by the "tests_config" + input dictionary. + + All the results generated by each experiment will be collected a result + folder which is named according to this template: + results//::/ + where: + - : the "tid" defined by the tests_config, or a timestamp + based folder in case "tid" is not specified + - : the class of workload executed, e.g. rtapp or sched_perf + - : the identifier of one of the specified configurations + - : the identified of one of the specified workload + - : the progressive execution number from 1 up to the + specified iterations + """ + + # Initialize globals + self._cgroup = None + + # Setup test configuration + if isinstance(tests_conf, dict): + logging.info('%14s - Loading custom (inline) test configuration', + 'Target') + self._tests_conf = tests_conf + elif isinstance(tests_conf, str): + logging.info('%14s - Loading custom (file) test configuration', + 'Target') + json_conf = JsonConf(tests_conf) + self._tests_conf = json_conf.load() + else: + raise ValueError('test_conf must be either a dictionary or a filepath') + + # Check for mandatory configurations + if 'confs' not in self._tests_conf or not self._tests_conf['confs']: + raise ValueError( + 'Configuration error: missing \'conf\' definitions') + if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']: + raise ValueError( + 'Configuration error: missing \'wloads\' definitions') + + # Setup devlib to access the configured target + self.te = TestEnv(target_conf, tests_conf) + self.target = self.te.target + + # Compute total number of experiments + self._exp_count = self._tests_conf['iterations'] \ + * len(self._tests_conf['wloads']) \ + * len(self._tests_conf['confs']) + + self._print_section('Executor', 'Experiments configuration') + + logging.info('%14s - Configured to run:', 'Executor') + + logging.info('%14s - %3d targt configurations:', + 'Executor', len(self._tests_conf['confs'])) + target_confs = [conf['tag'] for conf in self._tests_conf['confs']] + target_confs = ', '.join(target_confs) + logging.info('%14s - %s', 'Executor', target_confs) + + logging.info('%14s - %3d workloads (%d iterations each)', + 'Executor', len(self._tests_conf['wloads']), + self._tests_conf['iterations']) + wload_confs = ', '.join(self._tests_conf['wloads']) + logging.info('%14s - %s', 'Executor', wload_confs) + + logging.info('%14s - Total: %d experiments', + 'Executor', self._exp_count) + + logging.info('%14s - Results will be collected under:', 'Executor') + logging.info('%14s - %s', 'Executor', self.te.res_dir) + + def run(self): + self._print_section('Executor', 'Experiments execution') + + # Run all the configured experiments + exp_idx = 1 + for tc in self._tests_conf['confs']: + # TARGET: configuration + if not self._target_configure(tc): + continue + for wl_idx in self._tests_conf['wloads']: + # TEST: configuration + wload = self._wload_init(tc, wl_idx) + for itr_idx in range(1, self._tests_conf['iterations']+1): + # WORKLOAD: execution + self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx) + exp_idx += 1 + + self._print_section('Executor', 'Experiments execution completed') + logging.info('%14s - Results available in:', 'Executor') + logging.info('%14s - %s', 'Executor', self.te.res_dir) + + +################################################################################ +# Target Configuration +################################################################################ + + def _cgroups_init(self, tc): + if 'cgroups' not in tc: + return True + logging.info(r'%14s - Initialize CGroups support...', 'CGroups') + errors = False + for kind in tc['cgroups']['conf']: + logging.info(r'%14s - Setup [%s] controller...', + 'CGroups', kind) + controller = self.target.cgroups.controller(kind) + if not controller: + logging.warning(r'%14s - CGroups controller [%s] NOT available', + 'CGroups', kind) + errors = True + return not errors + + def _setup_kernel(self, tc): + # Deploy kernel on the device + self.te.install_kernel(tc, reboot=True) + # Setup the rootfs for the experiments + self._setup_rootfs(tc) + + def _setup_sched_features(self, tc): + if 'sched_features' not in tc: + logging.debug('%14s - Configuration not provided', 'SchedFeatures') + return + feats = tc['sched_features'].split(",") + for feat in feats: + self.target.execute('echo {} > /sys/kernel/debug/sched_features'.format(feat)) + + def _setup_rootfs(self, tc): + # Initialize CGroups if required + self._cgroups_init(tc) + # Setup target folder for experiments execution + self.te.run_dir = os.path.join( + self.target.working_directory, TGT_RUN_DIR) + # Create run folder as tmpfs + logging.debug('%14s - Setup RT-App run folder [%s]...', + 'TargetSetup', self.te.run_dir) + self.target.execute('[ -d {0} ] || mkdir {0}'\ + .format(self.te.run_dir), as_root=True) + self.target.execute( + 'grep schedtest /proc/mounts || '\ + ' mount -t tmpfs -o size=1024m {} {}'\ + .format('schedtest', self.te.run_dir), + as_root=True) + + def _setup_cpufreq(self, tc): + if 'cpufreq' not in tc: + logging.warning(r'%14s - governor not specified, '\ + 'using currently configured governor', + 'CPUFreq') + return + + cpufreq = tc['cpufreq'] + logging.info(r'%14s - Configuring all CPUs to use [%s] governor', + 'CPUFreq', cpufreq['governor']) + + if cpufreq['governor'] == 'ondemand': + try: + sampling_rate = cpufreq['params']['sampling_rate'] + except KeyError: + sampling_rate = 20000 + self.target.execute( + 'for CPU in /sys/devices/system/cpu/cpu[0-9]*; do '\ + ' echo {} > $CPU/cpufreq/scaling_governor; '\ + ' if [ -e $CPU/cpufreq/ondemand/sampling_rate ]; then'\ + ' echo {} > $CPU/cpufreq/ondemand/sampling_rate;'\ + ' else'\ + ' echo {} > $CPU/../cpufreq/ondemand/sampling_rate;'\ + ' fi;'\ + 'done'\ + .format('ondemand', sampling_rate, sampling_rate)) + else: + self.target.execute( + 'for CPU in /sys/devices/system/cpu/cpu[0-9]*; do '\ + ' echo {} > $CPU/cpufreq/scaling_governor; '\ + 'done'\ + .format(cpufreq['governor'])) + + def _setup_cgroups(self, tc): + if 'cgroups' not in tc: + return True + # Setup default CGroup to run tasks into + if 'default' in tc['cgroups']: + self._cgroup = tc['cgroups']['default'] + # Configure each required controller + if 'conf' not in tc['cgroups']: + return True + errors = False + for kind in tc['cgroups']['conf']: + controller = self.target.cgroups.controller(kind) + if not controller: + logging.warning(r'%14s - Configuration error: '\ + '[%s] contoller NOT supported', + 'CGroups', kind) + errors = True + continue + self._setup_controller(tc, controller) + return not errors + + def _setup_controller(self, tc, controller): + kind = controller.kind + # Configure each required groups for that controller + errors = False + for name in tc['cgroups']['conf'][controller.kind]: + group = controller.cgroup(name) + if not group: + logging.warning(r'%14s - Configuration error: '\ + '[%s/%s] cgroup NOT available', + 'CGroups', kind, name) + errors = True + continue + self._setup_group(tc, group) + return not errors + + def _setup_group(self, tc, group): + kind = group.controller.kind + name = group.name + # Configure each required attribute + group.set(**tc['cgroups']['conf'][kind][name]) + + def _target_configure(self, tc): + self._print_header('TargetConfig', + r'configuring target for [{}] experiments'\ + .format(tc['tag'])) + self._setup_kernel(tc) + self._setup_sched_features(tc) + self._setup_cpufreq(tc) + return self._setup_cgroups(tc) + + def _target_conf_flag(self, tc, flag): + if 'flags' not in tc: + has_flag = False + else: + has_flag = flag in tc['flags'] + logging.debug('%14s - Check if target conf [%s] has flag [%s]: %s', + 'TargetConf', tc['tag'], flag, has_flag) + return has_flag + + +################################################################################ +# Workload Setup and Execution +################################################################################ + + def _wload_cpus(self, wl_idx, wlspec): + if not 'cpus' in wlspec['conf']: + return None + cpus = wlspec['conf']['cpus'] + + if type(cpus) == int: + return list(cpus) + if cpus.startswith('littles'): + if 'first' in cpus: + return [ self.target.bl.littles_online[0] ] + if 'last' in cpus: + return [ self.target.bl.littles_online[-1] ] + return self.target.bl.littles_online + if cpus.startswith('bigs'): + if 'first' in cpus: + return [ self.target.bl.bigs_online[0] ] + if 'last' in cpus: + return [ self.target.bl.bigs_online[-1] ] + return self.target.bl.bigs_online + raise ValueError('Configuration error - ' + 'unsupported [{}] \'cpus\' value for [{}] '\ + 'workload specification'\ + .format(cpus, wl_idx)) + + def _wload_task_idxs(self, wl_idx, tasks): + if type(tasks) == int: + return range(tasks) + if tasks == 'cpus': + return range(len(self.target.core_names)) + if tasks == 'little': + return range(len([t + for t in self.target.core_names + if t == self.target.little_core])) + if tasks == 'big': + return range(len([t + for t in self.target.core_names + if t == self.target.big_core])) + raise ValueError('Configuration error - ' + 'unsupported \'tasks\' value for [{}] '\ + 'RT-App workload specification'\ + .format(wl_idx)) + + def _wload_rtapp(self, wl_idx, wlspec, cpus): + conf = wlspec['conf'] + logging.debug(r'%14s - Configuring [%s] rt-app...', + 'RTApp', conf['class']) + + # Setup a default "empty" task name prefix + if 'prefix' not in conf: + conf['prefix'] = 'task_' + + # Setup a default loadref CPU + loadref = None + if 'loadref' in wlspec: + loadref = wlspec['loadref'] + + if conf['class'] == 'profile': + params = {} + # Load each task specification + for task_name in conf['params']: + task = conf['params'][task_name] + task_name = conf['prefix'] + task_name + if task['kind'] not in wlgen.RTA.__dict__: + logging.error(r'%14s - RTA task of kind [%s] not supported', + 'RTApp', task['kind']) + raise ValueError('Configuration error - ' + 'unsupported \'kind\' value for task [{}] '\ + 'in RT-App workload specification'\ + .format(task)) + task_ctor = getattr(wlgen.RTA, task['kind']) + params[task_name] = task_ctor(**task['params']) + rtapp = wlgen.RTA(self.target, + wl_idx, calibration = self.te.calibration()) + rtapp.conf(kind='profile', params=params, loadref=loadref, + cpus=cpus, run_dir=self.te.run_dir) + return rtapp + + if conf['class'] == 'periodic': + task_idxs = self._wload_task_idxs(wl_idx, conf['tasks']) + params = {} + for idx in task_idxs: + task = conf['prefix'] + str(idx) + params[task] = wlgen.RTA.periodic(**conf['params']) + rtapp = wlgen.RTA(self.target, + wl_idx, calibration = self.te.calibration()) + rtapp.conf(kind='profile', params=params, loadref=loadref, + cpus=cpus, run_dir=self.te.run_dir) + return rtapp + + if conf['class'] == 'custom': + rtapp = wlgen.RTA(self.target, + wl_idx, calibration = self.te.calib) + rtapp.conf(kind='custom', + params=conf['json'], + duration=conf['duration'], + loadref=loadref, + cpus=cpus, run_dir=self.te.run_dir) + return rtapp + + raise ValueError('Configuration error - ' + 'unsupported \'class\' value for [{}] '\ + 'RT-App workload specification'\ + .format(wl_idx)) + + def _wload_perf_bench(self, wl_idx, wlspec, cpus): + conf = wlspec['conf'] + logging.debug(r'%14s - Configuring perf_message...', + 'PerfMessage') + + if conf['class'] == 'messaging': + perf_bench = wlgen.PerfMessaging(self.target, wl_idx) + perf_bench.conf(**conf['params']) + return perf_bench + + if conf['class'] == 'pipe': + perf_bench = wlgen.PerfPipe(self.target, wl_idx) + perf_bench.conf(**conf['params']) + return perf_bench + + raise ValueError('Configuration error - '\ + 'unsupported \'class\' value for [{}] '\ + 'perf bench workload specification'\ + .format(wl_idx)) + + def _wload_conf(self, wl_idx, wlspec): + + # CPUS: setup execution on CPUs if required by configuration + cpus = self._wload_cpus(wl_idx, wlspec) + + if wlspec['type'] == 'rt-app': + return self._wload_rtapp(wl_idx, wlspec, cpus) + if wlspec['type'] == 'perf_bench': + return self._wload_perf_bench(wl_idx, wlspec, cpus) + + + raise ValueError('Configuration error - ' + 'unsupported \'type\' value for [{}] '\ + 'workload specification'\ + .format(wl_idx)) + + def _wload_init(self, tc, wl_idx): + tc_idx = tc['tag'] + + # Configure the test workload + wlspec = self._tests_conf['wloads'][wl_idx] + wload = self._wload_conf(wl_idx, wlspec) + + # Keep track of platform configuration + self.te.test_dir = '{}/{}:{}:{}'\ + .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx) + os.system('mkdir -p ' + self.te.test_dir) + self.te.platform_dump(self.te.test_dir) + + # Keep track of kernel configuration and version + config = self.target.config + with gzip.open(os.path.join(self.te.test_dir, 'kernel.config'), 'wb') as fh: + fh.write(config.text) + output = self.target.execute('{} uname -a'\ + .format(self.target.busybox)) + with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh: + fh.write(output) + + return wload + + def _wload_run_init(self, run_idx): + self.te.out_dir = '{}/{}'\ + .format(self.te.test_dir, run_idx) + logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir) + os.system('mkdir -p ' + self.te.out_dir) + + logging.debug(r'%14s - cleanup target output folder', 'Executor') + + target_dir = self.target.working_directory + logging.debug('%14s - setup target directory [%s]', + 'Executor', target_dir) + + def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx): + tc_idx = tc['tag'] + + self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\ + .format(exp_idx, self._exp_count, + tc_idx, wl_idx, + run_idx, self._tests_conf['iterations'])) + + # Setup local results folder + self._wload_run_init(run_idx) + + # FTRACE: start (if a configuration has been provided) + if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): + logging.warning('%14s - FTrace events collection enabled', 'Executor') + self.te.ftrace.start() + + # ENERGY: start sampling + if self.te.emeter: + self.te.emeter.reset() + + # WORKLOAD: Run the configured workload + wload.run(out_dir=self.te.out_dir, cgroup=self._cgroup) + + # ENERGY: collect measurements + if self.te.emeter: + self.te.emeter.report(self.te.out_dir) + + # FTRACE: stop and collect measurements + if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): + self.te.ftrace.stop() + self.te.ftrace.get_trace(self.te.out_dir + '/trace.dat') + self.te.ftrace.get_stats(self.te.out_dir + '/trace_stat.json') + +################################################################################ +# Utility Functions +################################################################################ + + def _print_section(self, tag, message): + logging.info('') + logging.info(FMT_SECTION) + logging.info(r'%14s - %s', tag, message) + logging.info(FMT_SECTION) + + def _print_header(self, tag, message): + logging.info('') + logging.info(FMT_HEADER) + logging.info(r'%14s - %s', tag, message) + + def _print_title(self, tag, message): + logging.info(FMT_TITLE) + logging.info(r'%14s - %s', tag, message) + + +################################################################################ +# Globals +################################################################################ + +# Regular expression for comments +JSON_COMMENTS_RE = re.compile( + '(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', + re.DOTALL | re.MULTILINE +) + +# Target specific paths +TGT_RUN_DIR = 'run_dir' + +# Logging formatters +FMT_SECTION = r'{:#<80}'.format('') +FMT_HEADER = r'{:=<80}'.format('') +FMT_TITLE = r'{:~<80}'.format('') + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/test.py b/libs/utils/test.py new file mode 100644 index 0000000000000000000000000000000000000000..f616d2ada87e24cc6219e6b50cd106c3ee19e29b --- /dev/null +++ b/libs/utils/test.py @@ -0,0 +1,101 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os +import unittest + +from conf import JsonConf +from executor import Executor + +class LisaTest(unittest.TestCase): + """A base class for LISA defined tests""" + + def __init__(self, conf_file, *args, **kwargs): + """ + Base class to run LISA test experiments + """ + + self.logger = logging.getLogger('test') + self.logger.setLevel(logging.INFO) + if 'loglevel' in kwargs: + self.logger.setLevel(kwargs['loglevel']) + kwargs.pop('loglevel') + + + self.conf_file = conf_file + self.logger.info("%14s - Using configuration:", + "LisaTest") + self.logger.info("%14s - %s", + "LisaTest", self.conf_file) + + self.logger.debug("%14s - Load test specific configuration...", "LisaTest") + json_conf = JsonConf(self.conf_file) + self.conf = json_conf.load() + + self.logger.debug("%14s - Checking tests configuration...", "LisaTest") + self._checkConf() + + super(LisaTest, self).__init__(*args, **kwargs) + + self._runExperiments() + + def _runExperiments(self): + """ + Default experiments execution engine + """ + + self.logger.info("%14s - Setup tests execution engine...", "LisaTest") + self.executor = Executor(tests_conf = self.conf_file) + + # Alias executor objects to make less verbose tests code + self.te = self.executor.te + self.target = self.executor.target + + # Execute pre-experiments code defined by the test + self._experimentsInit() + + self.logger.info("%14s - Experiments execution...", "LisaTest") + self.executor.run() + + # Execute post-experiments code defined by the test + self._experimentsFinalize() + + def _checkConf(self): + """ + Check for mandatory configuration options + """ + assert 'confs' in self.conf, \ + "Configuration file missing target configurations ('confs' attribute)" + assert self.conf['confs'], \ + "Configuration file with empty set of target configurations ('confs' attribute)" + assert 'wloads' in self.conf, \ + "Configuration file missing workload configurations ('wloads' attribute)" + assert self.conf['wloads'], \ + "Configuration file with empty set of workloads ('wloads' attribute)" + + def _experimentsInit(self): + """ + Code executed before running the experiments + """ + + def _experimentsFinalize(self): + """ + Code executed after running the experiments + """ + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/wlgen/wlgen/perf_bench.py b/libs/wlgen/wlgen/perf_bench.py index c9e85636e19d40418150a9b3e2f8c4e71b3a7d86..85463d0a5013e1f316275b6edd240ff197260ee1 100644 --- a/libs/wlgen/wlgen/perf_bench.py +++ b/libs/wlgen/wlgen/perf_bench.py @@ -29,6 +29,8 @@ class PerfMessaging(Workload): target, name): + self.logger = logging.getLogger('perf_bench') + # TODO: Assume perf is pre-installed on target #target.setup('perf') @@ -67,7 +69,7 @@ class PerfMessaging(Workload): self.params['custom']['group'], self.params['custom']['loop']) - logging.debug('Command line: {}'.format(self.command)) + self.logger.debug('%14s - Command line: %s', 'PerfBench', self.command) # Set and return the test label self.test_label = '{0:s}_{1:02d}'.format(self.name, self.exc_id) @@ -84,7 +86,8 @@ class PerfMessaging(Workload): return logfile = '{}/output.log'.format(destdir) - logging.debug('Saving output on [%s]...', logfile) + self.logger.debug('%14s - Saving output on [%s]...', + 'PerfBench', logfile) with open(logfile, 'w') as ofile: for line in self.getOutput().split('\n'): ofile.write(line+'\n') @@ -97,11 +100,12 @@ class PerfMessaging(Workload): "performance" : perf } - logging.info('Completion time: %.6f, Performance %.6f', - ctime, perf) + self.logger.info('%14s - Completion time: %.6f, Performance %.6f', + 'PerfBench', ctime, perf) perfile = '{}/performance.json'.format(destdir) - logging.debug('Saving performance into [%s]...', perfile) + self.logger.debug('%14s - Saving performance into [%s]...', + 'PerfBench', perfile) with open(perfile, 'w') as ofile: json.dump(results, ofile, sort_keys=True, indent=4) @@ -138,7 +142,8 @@ class PerfPipe(Workload): .format(self.target.executables_directory, self.params['custom']['loop']) - logging.debug('Command line: {}'.format(self.command)) + self.logger.debug('%14s - Command line: %s', + 'PerfBench', self.command) # Set and return the test label self.test_label = '{0:s}_{1:02d}'.format(self.name, self.exc_id) @@ -165,7 +170,8 @@ class PerfPipe(Workload): return logfile = '{}/output.log'.format(destdir) - logging.debug('Saving output on [%s]...', logfile) + self.logger.debug('%14s - Saving output on [%s]...', + 'PerfBench', logfile) with open(logfile, 'w') as ofile: for line in self.getOutput().split('\n'): ofile.write(line+'\n') @@ -183,12 +189,13 @@ class PerfPipe(Workload): "ops/sec" : ops } - logging.info('Completion time: %.6f, Performance %.6f', - ctime, perf) + self.logger.info('%14s - Completion time: %.6f, Performance %.6f', + 'PerfBench', ctime, perf) # Reporting performance metric perfile = '{}/performance.json'.format(destdir) - logging.debug('Saving performance into [%s]...', perfile) + self.logger.debug('%14s - Saving performance into [%s]...', + 'PerfBench', perfile) with open(perfile, 'w') as ofile: json.dump(results, ofile, sort_keys=True, indent=4) diff --git a/libs/wlgen/wlgen/rta.py b/libs/wlgen/wlgen/rta.py index 4d3f2c766bf87b9320ac617ca109a210d04114a5..7adff85f44977cd631a08a2bed12fb19ea57ee02 100644 --- a/libs/wlgen/wlgen/rta.py +++ b/libs/wlgen/wlgen/rta.py @@ -108,15 +108,18 @@ class RTA(Workload): destdir = params['destdir'] if destdir is None: return - self.logger.debug('Pulling logfiles to [%s]...', destdir) + self.logger.debug('%14s - Pulling logfiles to [%s]...', + 'RTApp', destdir) for task in self.tasks.keys(): logfile = "'{0:s}/*{1:s}*.log'"\ .format(self.run_dir, task) self.target.pull(logfile, destdir) - self.logger.debug('Pulling JSON to [%s]...', destdir) + self.logger.debug('%14s - Pulling JSON to [%s]...', + 'RTApp', destdir) self.target.pull('{}/{}'.format(self.run_dir, self.json), destdir) logfile = '{}/output.log'.format(destdir) - self.logger.debug('Saving output on [%s]...', logfile) + self.logger.debug('%14s - Saving output on [%s]...', + 'RTApp', logfile) with open(logfile, 'w') as ofile: for line in self.output['executor'].split('\n'): ofile.write(line+'\n') @@ -199,20 +202,24 @@ class RTA(Workload): if self.pload is not None: if loadref and loadref.upper() == 'LITTLE': target_cpu = self._getFirstLittle() - self.logger.debug('ref on LITTLE cpu: %d', target_cpu) + self.logger.debug('%14s - ref on LITTLE cpu: %d', + 'RTApp', target_cpu) else: target_cpu = self._getFirstBig() - self.logger.debug('ref on big cpu: %d', target_cpu) + self.logger.debug('%14s - ref on big cpu: %d', + 'RTApp', target_cpu) return target_cpu # These options are selected only when RTApp has not been # already calibrated if self.cpus is None: target_cpu = self._getFirstBig() - self.logger.debug('ref on cpu: %d', target_cpu) + self.logger.debug('%14s - ref on cpu: %d', + 'RTApp', target_cpu) else: target_cpu = self._getFirstBiggest(self.cpus) - self.logger.debug('ref on (possible) biggest cpu: %d', target_cpu) + self.logger.debug('%14s - ref on (possible) biggest cpu: %d', + 'RTApp', target_cpu) return target_cpu def getCalibrationConf(self, target_cpu=0): @@ -269,10 +276,11 @@ class RTA(Workload): global_conf['calibration'] = calibration if self.duration is not None: global_conf['duration'] = self.duration - self.logger.warn('Limiting workload duration to %d [s]', - global_conf['duration']) + self.logger.warn('%14s - Limiting workload duration to %d [s]', + 'RTApp', global_conf['duration']) else: - self.logger.info('Workload duration defined by longest task') + self.logger.info('%14s - Workload duration defined by longest task', + 'RTApp') # Setup default scheduling class if 'policy' in self.sched: @@ -282,7 +290,8 @@ class RTA(Workload): .format(policy)) global_conf['default_policy'] = 'SCHED_' + self.sched['policy'] - self.logger.info('Default policy: %s', global_conf['default_policy']) + self.logger.info('%14s - Default policy: %s', + 'RTApp', global_conf['default_policy']) # Setup global configuration self.rta_profile['global'] = global_conf @@ -312,28 +321,31 @@ class RTA(Workload): # Initialize task phases task_conf['phases'] = {} - self.logger.info('------------------------') - self.logger.info('task [%s], %s', tid, sched_descr) + self.logger.info('%14s - ------------------------', 'RTApp') + self.logger.info('%14s - task [%s], %s', 'RTApp', tid, sched_descr) if 'delay' in task.keys(): if task['delay'] > 0: task['delay'] = int(task['delay'] * 1e6) task_conf['phases']['p000000'] = {} task_conf['phases']['p000000']['delay'] = task['delay'] - self.logger.info(' | start delay: %.6f [s]', - task['delay'] / 1e6) + self.logger.info('%14s - | start delay: %.6f [s]', + 'RTApp', task['delay'] / 1e6) - self.logger.info(' | calibration CPU: %d', target_cpu) + self.logger.info('%14s - | calibration CPU: %d', + 'RTApp', target_cpu) if 'loops' not in task.keys(): task['loops'] = 1 task_conf['loop'] = task['loops'] - self.logger.info(' | loops count: %d', task['loops']) + self.logger.info('%14s - | loops count: %d', + 'RTApp', task['loops']) # Setup task affinity if 'cpus' in task and task['cpus']: task_conf['cpus'] = ranges_to_list(task['cpus']) - self.logger.info(' | CPUs affinity: %s', task['cpus']) + self.logger.info('%14s - | CPUs affinity: %s', + 'RTApp', task['cpus']) # Setup task configuration self.rta_profile['tasks'][tid] = task_conf @@ -350,8 +362,8 @@ class RTA(Workload): # A duty-cycle of 0[%] translates on a 'sleep' phase if duty_cycle == 0: - self.logger.info(' + phase_%06d: sleep %.6f [s]', - pid, duration/1e6) + self.logger.info('%14s - + phase_%06d: sleep %.6f [s]', + 'RTApp', pid, duration/1e6) task_phase = { 'loop': 1, @@ -361,8 +373,8 @@ class RTA(Workload): # A duty-cycle of 100[%] translates on a 'run-only' phase elif duty_cycle == 100: - self.logger.info(' + phase_%06d: batch %.6f [s]', - pid, duration/1e6) + self.logger.info('%14s - + phase_%06d: batch %.6f [s]', + 'RTApp', pid, duration/1e6) task_phase = { 'loop': 1, @@ -381,14 +393,14 @@ class RTA(Workload): running_time = period - sleep_time self.logger.info( - ' + phase_%06d: duration %.6f [s] (%d loops)', - pid, duration/1e6, cloops) + '%14s - + phase_%06d: duration %.6f [s] (%d loops)', + 'RTApp', pid, duration/1e6, cloops) self.logger.info( - ' | period %6d [us], duty_cycle %3d %%', - period, duty_cycle) + '%14s - | period %6d [us], duty_cycle %3d %%', + 'RTApp', period, duty_cycle) self.logger.info( - ' | run_time %6d [us], sleep_time %6d [us]', - running_time, sleep_time) + '%14s - | run_time %6d [us], sleep_time %6d [us]', + 'RTApp', running_time, sleep_time) task_phase = { 'loop': cloops, diff --git a/libs/wlgen/wlgen/workload.py b/libs/wlgen/wlgen/workload.py index ccb3871847fe45d7095d972b89cb81c9a279a558..bdabab2cad57e84ad66833f37139d267f1defa4d 100644 --- a/libs/wlgen/wlgen/workload.py +++ b/libs/wlgen/wlgen/workload.py @@ -86,26 +86,26 @@ class Workload(object): # Map of task/s parameters self.params = {} - logging.info('Setup new workload %s', self.name) + logging.info('%14s - Setup new workload %s', 'WlGen', self.name) def __callback(self, step, **kwords): if step not in self.steps.keys(): raise ValueError('Callbacks for [%s] step not supported', step) if self.steps[step] is None: return - logging.debug('Callback [%s]...', step) + logging.debug('%14s - Callback [%s]...', 'WlGen', step) self.steps[step](kwords) def setCallback(self, step, func): - logging.debug('Setup step [%s] callback to [%s] function', - step, func.__name__) + logging.debug('%14s - Setup step [%s] callback to [%s] function', + 'WlGen', step, func.__name__) self.steps[step] = func def getCpusMask(self, cpus=None): mask = 0x0 for cpu in (cpus or self.target.list_online_cpus()): mask |= (1 << cpu) - # logging.debug('0x{0:X}'.format(mask)) + # logging.debug('%14s - 0x{0:X}'.format(mask), 'WlGen') return mask def conf(self, @@ -135,16 +135,16 @@ class Workload(object): # Configure a profile workload if kind == 'profile': - logging.debug('Configuring a profile-based workload...') + logging.debug('%14s - Configuring a profile-based workload...', 'WlGen') self.params['profile'] = params # Configure a custom workload elif kind == 'custom': - logging.debug('Configuring custom workload...') + logging.debug('%14s - Configuring custom workload...', 'WlGen') self.params['custom'] = params else: - logging.error('%s is not a supported RTApp workload kind', kind) + logging.error('%14s - %s is not a supported RTApp workload kind', 'WlGen', kind) raise ValueError('RTApp workload kind not supported') def run(self, @@ -157,7 +157,7 @@ class Workload(object): self.cgroup = cgroup if self.command is None: - logging.error('Error: empty executor command') + logging.error('%14s - Error: empty executor command', 'WlGen') # Prepend eventually required taskset command if self.cpus: @@ -182,13 +182,13 @@ class Workload(object): # Start task in background if required if background: - logging.debug('Executor [background]: %s', self.command) + logging.debug('%14s - WlGen [background]: %s', 'WlGen', self.command) results = self.target.execute(self.command, background=True, as_root=as_root) self.output['executor'] = results return results - logging.info('Executor [start]: %s', self.command) + logging.info('%14s - WlGen [start]: %s', 'WlGen', self.command) # Run command and wait for it to complete results = self.target.execute(self.command, @@ -203,14 +203,16 @@ class Workload(object): ftrace_dat = out_dir + '/' + self.test_label + '.dat' dirname = os.path.dirname(ftrace_dat) if not os.path.exists(dirname): - logging.debug('Create ftrace results folder [%s]', dirname) + logging.debug('%14s - Create ftrace results folder [%s]', + 'WlGen', dirname) os.makedirs(dirname) - logging.info('Pulling trace file into [%s]...', ftrace_dat) + logging.info('%14s - Pulling trace file into [%s]...', + 'WlGen', ftrace_dat) ftrace.get_trace(ftrace_dat) self.__callback('postrun', destdir=out_dir) - logging.info('Executor [end]: %s', self.command) + logging.debug('%14s - [end]: %s', 'WlGen', self.command) return ftrace_dat @@ -245,23 +247,23 @@ class Workload(object): return self.tasks if task_names is None: task_names = self.tasks.keys() - logging.debug('Lookup dataset for tasks...') + logging.debug('%14s - Lookup dataset for tasks...', 'WlGen') for task_name in task_names: results = dataframe[dataframe[name_key] == task_name]\ [[name_key,pid_key]] if len(results)==0: - logging.error(' task %16s NOT found', task_name) + logging.error(' task %16s NOT found', 'WlGen', task_name) continue (name, pid) = results.head(1).values[0] if name != task_name: - logging.error(' task %16s NOT found', task_name) + logging.error(' task %16s NOT found', 'WlGen', task_name) continue if task_name not in self.tasks: self.tasks[task_name] = {} pids = list(results[pid_key].unique()) self.tasks[task_name]['pid'] = pids - logging.info(' task %16s found, pid: %s', - task_name, self.tasks[task_name]['pid']) + logging.info('%14s - task %16s found, pid: %s', + 'WlGen', task_name, self.tasks[task_name]['pid']) return self.tasks def listAll(self, kill=False): @@ -269,13 +271,15 @@ class Workload(object): tasks = self.target.run('ps | grep {0:s}'.format(self.executor)) for task in tasks: task = task.split() - logging.info('%5s: %s (%s)', task[1], task[8], task[0]) + logging.info('%14s - %5s: %s (%s)', + 'WlGen', task[1], task[8], task[0]) if kill: self.target.run('kill -9 {0:s}'.format(task[1])) def killAll(self): if self.executor is None: return - logging.info('Killing all [%s] instances:', self.executor) + logging.info('%14s - Killing all [%s] instances:', + 'WlGen', self.executor) self.listAll(True) diff --git a/tests/eas/rfc_eas.config b/tests/eas/rfc.config similarity index 100% rename from tests/eas/rfc_eas.config rename to tests/eas/rfc.config diff --git a/tests/eas/rfc.py b/tests/eas/rfc.py index c5841c45b9a4471eeb0452022cb469cbe0a168ec..9a8ceab0ad1c9df6867420945658a39198b9dc72 100644 --- a/tests/eas/rfc.py +++ b/tests/eas/rfc.py @@ -15,620 +15,26 @@ # limitations under the License. # -from bart.common.Analyzer import Analyzer -import collections -import datetime -import gzip -import json -import os -import re -import time -import trappy - -# Configure logging import logging -reload(logging) -logging.basicConfig( - format='%(asctime)-9s %(levelname)-8s: %(message)s', - level=logging.INFO, - datefmt='%I:%M:%S') - -# Add UnitTest support -import unittest - -# Add support for Test Environment configuration -from env import TestEnv - -# Add JSON parsing support -from conf import JsonConf - -import wlgen - -# Target specific paths -TGT_RUN_DIR = 'run_dir' - -################################################################################ -# Base RFC class -################################################################################ - -class TestBase(unittest.TestCase): - - @classmethod - def setUpTest(cls, tests_config): - - # Initialize globals - cls.kernel = None - cls.dtb = None - cls.cgroup = None - - cls.print_section('Main', 'Experiments configuration') - - # Load test specific configuration - tests_config = os.path.join('tests/eas', tests_config) - logging.info('%14s - Loading EAS RFC tests configuration [%s]...', - 'Main', tests_config) - json_conf = JsonConf(tests_config) - cls.conf = json_conf.load() - - - # Check for mandatory configurations - if 'confs' not in cls.conf or not cls.conf['confs']: - raise ValueError( - 'Configuration error: missing \'conf\' definitions') - if 'wloads' not in cls.conf or not cls.conf['wloads']: - raise ValueError( - 'Configuration error: missing \'wloads\' definitions') - - # Setup devlib to access the configured target - cls.env = TestEnv(test_conf = cls.conf) - - # Compute total number of experiments - cls.exp_count = cls.conf['iterations'] \ - * len(cls.conf['wloads']) \ - * len(cls.conf['confs']) - - cls.print_section('Main', 'Experiments execution') +import os - # Run all the configured experiments - exp_idx = 1 - for tc in cls.conf['confs']: - # TARGET: configuration - if not cls.target_configure(tc): - continue - for wl_idx in cls.conf['wloads']: - # TEST: configuration - wload = cls.wload_init(tc, wl_idx) - for itr_idx in range(1, cls.conf['iterations']+1): - # WORKLOAD: execution - cls.wload_run(exp_idx, tc, wl_idx, wload, itr_idx) - exp_idx += 1 +from test import LisaTest - cls.print_section('Main', 'Experiments post-processing') +TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) +TESTS_CONF = os.path.join(TESTS_DIRECTORY, "rfc.config") +class RFC(LisaTest): + """Tests for the Energy-Aware Scheduler""" -################################################################################ -# Test cases -################################################################################ + def __init__(self, *args, **kwargs): + super(RFC, self).__init__(TESTS_CONF, *args, **kwargs) - def test_execution_complete(self): - """Check that data have been collected from the target""" + def test_energy_regression(self): + """Check that there is not regression on energy""" # TODO -################################################################################ -# Utility methods -################################################################################ - - @classmethod - def load_conf(cls, filename): - """ Parse a JSON file - First remove comments and then use the json module package - Comments look like : - // ... - or - /* - ... - */ - """ - if not os.path.isfile(filename): - raise RuntimeError( - 'Missing configuration file: {}'.format(filename) - ) - logging.debug('loading JSON...') - - with open(filename) as f: - content = ''.join(f.readlines()) - - ## Looking for comments - match = JSON_COMMENTS_RE.search(content) - while match: - # single line comment - content = content[:match.start()] + content[match.end():] - match = JSON_COMMENTS_RE.search(content) - - # Return json file - conf = json.loads(content, parse_int=int) - logging.debug('Target config: %s', conf) - - return conf - - @classmethod - def print_section(cls, tag, message): - logging.info('') - logging.info(FMT_SECTION) - logging.info(r'%14s - %s', tag, message) - logging.info(FMT_SECTION) - - @classmethod - def print_header(cls, tag, message): - logging.info('') - logging.info(FMT_HEADER) - logging.info(r'%14s - %s', tag, message) - - @classmethod - def print_title(cls, tag, message): - logging.info(FMT_TITLE) - logging.info(r'%14s - %s', tag, message) - - @classmethod - def cgroups_init(cls, tc): - if 'cgroups' not in tc: - return True - logging.info(r'%14s - Initialize CGroups support...', 'CGroups') - errors = False - for kind in tc['cgroups']['conf']: - logging.info(r'%14s - Setup [%s] controller...', - 'CGroups', kind) - controller = cls.env.target.cgroups.controller(kind) - if not controller: - logging.warning(r'%14s - CGroups controller [%s] NOT available', - 'CGroups', kind) - errors = True - return not errors - - @classmethod - def setup_kernel(cls, tc): - # Deploy kernel on the device - cls.env.install_kernel(tc, reboot=True) - # Setup the rootfs for the experiments - cls.setup_rootfs(tc) - - @classmethod - def setup_sched_features(cls, tc): - if 'sched_features' not in tc: - logging.debug('%14s - Configuration not provided', 'SchedFeatures') - return - feats = tc['sched_features'].split(",") - for feat in feats: - cls.env.target.execute('echo {} > /sys/kernel/debug/sched_features'.format(feat)) - - @classmethod - def setup_rootfs(cls, tc): - # Initialize CGroups if required - cls.cgroups_init(tc) - # Setup target folder for experiments execution - cls.env.run_dir = os.path.join( - cls.env.target.working_directory, TGT_RUN_DIR) - # Create run folder as tmpfs - logging.debug('%14s - Setup RT-App run folder [%s]...', - 'TargetSetup', cls.env.run_dir) - cls.env.target.execute('[ -d {0} ] || mkdir {0}'\ - .format(cls.env.run_dir), as_root=True) - cls.env.target.execute( - 'grep schedtest /proc/mounts || '\ - ' mount -t tmpfs -o size=1024m {} {}'\ - .format('schedtest', cls.env.run_dir), - as_root=True) - - @classmethod - def setup_cpufreq(cls, tc): - if 'cpufreq' not in tc: - logging.warning(r'%14s - governor not specified, '\ - 'using currently configured governor', - 'CPUFreq') - return - - cpufreq = tc['cpufreq'] - logging.info(r'%14s - Configuring all CPUs to use [%s] governor', - 'CPUFreq', cpufreq['governor']) - - if cpufreq['governor'] == 'ondemand': - try: - sampling_rate = cpufreq['params']['sampling_rate'] - except KeyError: - sampling_rate = 20000 - cls.env.target.execute( - 'for CPU in /sys/devices/system/cpu/cpu[0-9]*; do '\ - ' echo {} > $CPU/cpufreq/scaling_governor; '\ - ' if [ -e $CPU/cpufreq/ondemand/sampling_rate ]; then'\ - ' echo {} > $CPU/cpufreq/ondemand/sampling_rate;'\ - ' else'\ - ' echo {} > $CPU/../cpufreq/ondemand/sampling_rate;'\ - ' fi;'\ - 'done'\ - .format('ondemand', sampling_rate, sampling_rate)) - else: - cls.env.target.execute( - 'for CPU in /sys/devices/system/cpu/cpu[0-9]*; do '\ - ' echo {} > $CPU/cpufreq/scaling_governor; '\ - 'done'\ - .format(cpufreq['governor'])) - - @classmethod - def setup_cgroups(cls, tc): - if 'cgroups' not in tc: - return True - # Setup default CGroup to run tasks into - if 'default' in tc['cgroups']: - cls.cgroup = tc['cgroups']['default'] - # Configure each required controller - if 'conf' not in tc['cgroups']: - return True - errors = False - for kind in tc['cgroups']['conf']: - controller = cls.env.target.cgroups.controller(kind) - if not controller: - logging.warning(r'%14s - Configuration error: '\ - '[%s] contoller NOT supported', - 'CGroups', kind) - errors = True - continue - cls.setup_controller(tc, controller) - return not errors - - @classmethod - def setup_controller(cls, tc, controller): - kind = controller.kind - # Configure each required groups for that controller - errors = False - for name in tc['cgroups']['conf'][controller.kind]: - group = controller.cgroup(name) - if not group: - logging.warning(r'%14s - Configuration error: '\ - '[%s/%s] cgroup NOT available', - 'CGroups', kind, name) - errors = True - continue - cls.setup_group(tc, group) - return not errors - - @classmethod - def setup_group(cls, tc, group): - kind = group.controller.kind - name = group.name - # Configure each required attribute - group.set(**tc['cgroups']['conf'][kind][name]) - - @classmethod - def target_configure(cls, tc): - cls.print_header('TargetConfig', - r'configuring target for [{}] experiments'\ - .format(tc['tag'])) - cls.setup_kernel(tc) - cls.setup_sched_features(tc) - cls.setup_cpufreq(tc) - return cls.setup_cgroups(tc) - - @classmethod - def target_conf_flag(cls, tc, flag): - if 'flags' not in tc: - has_flag = False - else: - has_flag = flag in tc['flags'] - logging.debug('%14s - Check if target conf [%s] has flag [%s]: %s', - 'TargetConf', tc['tag'], flag, has_flag) - return has_flag - - # def cleanup(cls): - # target.execute('umount ' + wl_logs, as_root=True) - # target.execute('rmdir ' + wl_logs, as_root=True) - - @classmethod - def wload_rtapp_task_idxs(cls, wl_idx, tasks): - if type(tasks) == int: - return range(tasks) - if tasks == 'cpus': - return range(len(cls.env.target.core_names)) - if tasks == 'little': - return range(len([t - for t in cls.env.target.core_names - if t == cls.env.target.little_core])) - if tasks == 'big': - return range(len([t - for t in cls.env.target.core_names - if t == cls.env.target.big_core])) - raise ValueError('Configuration error - ' - 'unsupported \'tasks\' value for [{}] '\ - 'RT-App workload specification'\ - .format(wl_idx)) - - @classmethod - def wload_cpus(cls, wl_idx, wlspec): - if not 'cpus' in wlspec['conf']: - return None - cpus = wlspec['conf']['cpus'] - - if type(cpus) == int: - return list(cpus) - if cpus.startswith('littles'): - if 'first' in cpus: - return [ cls.env.target.bl.littles_online[0] ] - if 'last' in cpus: - return [ cls.env.target.bl.littles_online[-1] ] - return cls.env.target.bl.littles_online - if cpus.startswith('bigs'): - if 'first' in cpus: - return [ cls.env.target.bl.bigs_online[0] ] - if 'last' in cpus: - return [ cls.env.target.bl.bigs_online[-1] ] - return cls.env.target.bl.bigs_online - raise ValueError('Configuration error - ' - 'unsupported [{}] \'cpus\' value for [{}] '\ - 'workload specification'\ - .format(cpus, wl_idx)) - - @classmethod - def wload_rtapp(cls, wl_idx, wlspec, cpus): - conf = wlspec['conf'] - logging.debug(r'%14s - Configuring [%s] rt-app...', - 'RTApp', conf['class']) - - # Setup a default "empty" task name prefix - if 'prefix' not in conf: - conf['prefix'] = 'task_' - - # Setup a default loadref CPU - loadref = None - if 'loadref' in wlspec: - loadref = wlspec['loadref'] - - if conf['class'] == 'profile': - params = {} - # Load each task specification - for task_name in conf['params']: - task = conf['params'][task_name] - task_name = conf['prefix'] + task_name - if task['kind'] not in wlgen.RTA.__dict__: - logging.error(r'%14s - RTA task of kind [%s] not supported', - 'RTApp', task['kind']) - raise ValueError('Configuration error - ' - 'unsupported \'kind\' value for task [{}] '\ - 'in RT-App workload specification'\ - .format(task)) - task_ctor = getattr(wlgen.RTA, task['kind']) - params[task_name] = task_ctor(**task['params']) - rtapp = wlgen.RTA(cls.env.target, - wl_idx, calibration = cls.env.calibration()) - rtapp.conf(kind='profile', params=params, loadref=loadref, - cpus=cpus, run_dir=cls.env.run_dir) - return rtapp - - if conf['class'] == 'periodic': - task_idxs = cls.wload_rtapp_task_idxs(wl_idx, conf['tasks']) - params = {} - for idx in task_idxs: - task = conf['prefix'] + str(idx) - params[task] = wlgen.RTA.periodic(**conf['params']) - rtapp = wlgen.RTA(cls.env.target, - wl_idx, calibration = cls.env.calibration()) - rtapp.conf(kind='profile', params=params, loadref=loadref, - cpus=cpus, run_dir=cls.env.run_dir) - return rtapp - - if conf['class'] == 'custom': - rtapp = wlgen.RTA(cls.env.target, - wl_idx, calibration = cls.env.calib) - rtapp.conf(kind='custom', - params=conf['json'], - duration=conf['duration'], - loadref=loadref, - cpus=cpus, run_dir=cls.env.run_dir) - return rtapp - - raise ValueError('Configuration error - ' - 'unsupported \'class\' value for [{}] '\ - 'RT-App workload specification'\ - .format(wl_idx)) - - @classmethod - def wload_perf_bench(cls, wl_idx, wlspec, cpus): - conf = wlspec['conf'] - logging.debug(r'%14s - Configuring perf_message...', - 'PerfMessage') - - if conf['class'] == 'messaging': - perf_bench = wlgen.PerfMessaging(cls.env.target, wl_idx) - perf_bench.conf(**conf['params']) - return perf_bench - - if conf['class'] == 'pipe': - perf_bench = wlgen.PerfPipe(cls.env.target, wl_idx) - perf_bench.conf(**conf['params']) - return perf_bench - - raise ValueError('Configuration error - '\ - 'unsupported \'class\' value for [{}] '\ - 'perf bench workload specification'\ - .format(wl_idx)) - - @classmethod - def wload_conf(cls, wl_idx, wlspec): - - # CPUS: setup execution on CPUs if required by configuration - cpus = cls.wload_cpus(wl_idx, wlspec) - - if wlspec['type'] == 'rt-app': - return cls.wload_rtapp(wl_idx, wlspec, cpus) - if wlspec['type'] == 'perf_bench': - return cls.wload_perf_bench(wl_idx, wlspec, cpus) - - - raise ValueError('Configuration error - ' - 'unsupported \'type\' value for [{}] '\ - 'workload specification'\ - .format(wl_idx)) - - @classmethod - def wload_init(cls, tc, wl_idx): - tc_idx = tc['tag'] - - # Configure the test workload - wlspec = cls.conf['wloads'][wl_idx] - wload = cls.wload_conf(wl_idx, wlspec) - - # Keep track of platform configuration - cls.env.test_dir = '{}/{}:{}:{}'\ - .format(cls.env.res_dir, wload.wtype, tc_idx, wl_idx) - os.system('mkdir -p ' + cls.env.test_dir) - cls.env.platform_dump(cls.env.test_dir) - - # Keep track of kernel configuration and version - config = cls.env.target.config - with gzip.open(os.path.join(cls.env.test_dir, 'kernel.config'), 'wb') as fh: - fh.write(config.text) - output = cls.env.target.execute('{} uname -a'\ - .format(cls.env.target.busybox)) - with open(os.path.join(cls.env.test_dir, 'kernel.version'), 'w') as fh: - fh.write(output) - - return wload - - @classmethod - def wload_run(cls, exp_idx, tc, wl_idx, wload, run_idx): - tc_idx = tc['tag'] - - cls.print_title('MultiRun', 'Experiment {}/{}, [{}:{}] {}/{}'\ - .format(exp_idx, cls.exp_count, - tc_idx, wl_idx, - run_idx, cls.conf['iterations'])) - - # Setup local results folder - cls.wload_run_init(run_idx) - - # FTRACE: start (if a configuration has been provided) - if cls.env.ftrace and cls.target_conf_flag(tc, 'ftrace'): - logging.warning('%14s - Starting FTrace', 'MultiRun') - cls.env.ftrace.start() - - # ENERGY: start sampling - if cls.env.emeter: - cls.env.emeter.reset() - - # WORKLOAD: Run the configured workload - wload.run(out_dir=cls.env.out_dir, cgroup=cls.cgroup) - - # ENERGY: collect measurements - if cls.env.emeter: - cls.env.emeter.report(cls.env.out_dir) - - # FTRACE: stop and collect measurements - if cls.env.ftrace and cls.target_conf_flag(tc, 'ftrace'): - cls.env.ftrace.stop() - cls.env.ftrace.get_trace(cls.env.out_dir + '/trace.dat') - cls.env.ftrace.get_stats(cls.env.out_dir + '/trace_stat.json') - - @classmethod - def wload_run_init(cls, run_idx): - cls.env.out_dir = '{}/{}'\ - .format(cls.env.test_dir, run_idx) - logging.debug(r'%14s - out_dir [%s]', 'MultiRun', cls.env.out_dir) - os.system('mkdir -p ' + cls.env.out_dir) - - logging.debug(r'%14s - cleanup target output folder', 'MultiRun') - - target_dir = cls.env.target.working_directory - logging.debug('%14s - setup target directory [%s]', - 'MultiRun', target_dir) - # cls.env.target.execute('rm {}/output.txt'\ - # .format(target_dir), as_root=True) - - -################################################################################ -# Specific RFC test cases -################################################################################ - -class EAS(TestBase): - - @classmethod - def setUpClass(cls): - super(EAS, cls).setUpTest('rfc_eas.config') - - -class SFreq(TestBase): - - @classmethod - def setUpClass(cls): - super(SFreq, cls).setUpTest('rfc_sfreq.config') - - -class STune(TestBase): - - @classmethod - def setUpClass(cls): - super(STune, cls).setUpTest('rfc_stune.config') - - def test_boosted_utilization_signal(self): - """The boosted utilization signal is appropriately boosted - - The margin should match the formula - (sched_load_scale - utilization) * boost""" - - for tc in self.conf["confs"]: - test_id = tc["tag"] - - wload_idx = self.conf["wloads"].keys()[0] - run_dir = os.path.join(self.env.res_dir, - "rtapp:{}:{}".format(test_id, wload_idx), - "1") - - ftrace_events = ["sched_boost_task"] - ftrace = trappy.FTrace(run_dir, scope="custom", - events=ftrace_events) - - first_task_params = self.conf["wloads"][wload_idx]["conf"]["params"] - first_task_name = first_task_params.keys()[0] - rta_task_name = "task_{}".format(first_task_name) - - sbt_dfr = ftrace.sched_boost_task.data_frame - boost_task_rtapp = sbt_dfr[sbt_dfr.comm == rta_task_name] - - # Avoid the first period as the task starts with a very - # high load and it overutilizes the CPU - rtapp_period = first_task_params[first_task_name]["params"]["period_ms"] - task_start = boost_task_rtapp.index[0] - after_first_period = task_start + rtapp_period - boost_task_rtapp = boost_task_rtapp.ix[after_first_period:] - - sched_load_scale = 1024 - boost = tc["cgroups"]["conf"]["schedtune"]["/stune"]["boost"] / 100. - utilization = boost_task_rtapp["utilization"] - expected_margin = (sched_load_scale - utilization) * boost - expected_margin = expected_margin.astype(int) - boost_task_rtapp["expected_margin"] = expected_margin - ftrace.add_parsed_event("boost_task_rtapp", boost_task_rtapp) - - analyzer = Analyzer(ftrace, {}) - statement = "boost_task_rtapp:margin == boost_task_rtapp:expected_margin" - error_msg = "task was not boosted to the expected margin: {}".\ - format(boost) - self.assertTrue(analyzer.assertStatement(statement), msg=error_msg) - - -################################################################################ -# Globals -################################################################################ - -# Regular expression for comments -JSON_COMMENTS_RE = re.compile( - '(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', - re.DOTALL | re.MULTILINE -) - -# Logging formatters -FMT_SECTION = r'{:#<80}'.format('') -FMT_HEADER = r'{:=<80}'.format('') -FMT_TITLE = r'{:~<80}'.format('') - -if __name__ == '__main__': - unittest.main() + def test_performance_regression(self): + """Check that there is not regression on performance""" + # TODO # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/tests/eas/rfc_sfreq.config b/tests/sfreq/smoke_test.config similarity index 100% rename from tests/eas/rfc_sfreq.config rename to tests/sfreq/smoke_test.config diff --git a/tests/sfreq/smoke_test.py b/tests/sfreq/smoke_test.py new file mode 100644 index 0000000000000000000000000000000000000000..02531df6ef75a1788a6e7eee2fcba3326a7db80c --- /dev/null +++ b/tests/sfreq/smoke_test.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os + +from test import LisaTest + +TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) +TESTS_CONF = os.path.join(TESTS_DIRECTORY, "smoke_test.config") + +class SFreq(LisaTest): + """Tests for SchedFreq framework""" + + def __init__(self, *args, **kwargs): + super(SFreq, self).__init__(TESTS_CONF, *args, **kwargs) + + def test_regression(self): + """Check that there is not regression on energy""" + # TODO + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/tests/eas/rfc_stune.config b/tests/stune/smoke_test_ramp.config similarity index 84% rename from tests/eas/rfc_stune.config rename to tests/stune/smoke_test_ramp.config index a9ec08701d884174f1a8d8860233808d8ff95f93..6fff26a981848f36868b8b168b563bf0ca6997f8 100644 --- a/tests/eas/rfc_stune.config +++ b/tests/stune/smoke_test_ramp.config @@ -4,7 +4,7 @@ "exclude_modules" : [ ], /* Binary tools required by the experiments */ - "tools" : ["rt-app"], + "tools" : [ "rt-app" ], /* FTrace configuration */ "ftrace" : { @@ -21,9 +21,9 @@ "sched_boost_task", "sched_energy_diff", "cpu_frequency", - "cpu_capacity" + "cpu_capacity", ], - "buffsize" : 10240 + "buffsize" : 10240, }, /* Set of platform configurations to test */ @@ -37,10 +37,10 @@ "conf" : { "schedtune" : { "/" : {"boost" : 0 }, - "/stune" : {"boost" : 0 } + "/stune" : {"boost" : 0 }, } }, - "default" : "/" + "default" : "/", } }, { @@ -52,10 +52,10 @@ "conf" : { "schedtune" : { "/" : {"boost" : 0 }, - "/stune" : {"boost" : 15 } + "/stune" : {"boost" : 15 }, } }, - "default" : "/stune" + "default" : "/stune", } }, { @@ -67,10 +67,10 @@ "conf" : { "schedtune" : { "/" : {"boost" : 0 }, - "/stune" : {"boost" : 30 } + "/stune" : {"boost" : 30 }, } }, - "default" : "/stune" + "default" : "/stune", } }, { @@ -82,10 +82,10 @@ "conf" : { "schedtune" : { "/" : {"boost" : 0 }, - "/stune" : {"boost" : 60 } + "/stune" : {"boost" : 60 }, } }, - "default" : "/stune" + "default" : "/stune", } } @@ -105,20 +105,18 @@ "start_pct" : 5, "end_pct" : 60, "delta_pct" : 5, - "time_s" : 1 + "time_s" : 1, } } } }, - "loadref" : "LITTLE" + "loadref" : "LITTLE", } }, /* Number of iterations for each workload */ "iterations" : 1, - /* This must be the last entry */ - "__last__" : "" } // vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/tests/stune/smoke_test_ramp.py b/tests/stune/smoke_test_ramp.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab1bc0b85d1733d3161b5c62dee0c5784700cb3 --- /dev/null +++ b/tests/stune/smoke_test_ramp.py @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os + +from test import LisaTest + +import trappy +from bart.common.Analyzer import Analyzer + +TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) +TESTS_CONF = os.path.join(TESTS_DIRECTORY, "smoke_test_ramp.config") + +class STune(LisaTest): + """Tests for SchedTune framework""" + + def __init__(self, *args, **kwargs): + super(STune, self).__init__(TESTS_CONF, *args, **kwargs) + + def test_boosted_utilization_signal(self): + """The boosted utilization signal is appropriately boosted + + The margin should match the formula + (sched_load_scale - util) * boost""" + + for tc in self.conf["confs"]: + test_id = tc["tag"] + + wload_idx = self.conf["wloads"].keys()[0] + run_dir = os.path.join(self.te.res_dir, + "rtapp:{}:{}".format(test_id, wload_idx), + "1") + + ftrace_events = ["sched_boost_task"] + ftrace = trappy.FTrace(run_dir, scope="custom", + events=ftrace_events) + + first_task_params = self.conf["wloads"][wload_idx]["conf"]["params"] + first_task_name = first_task_params.keys()[0] + rta_task_name = "task_{}".format(first_task_name) + + sbt_dfr = ftrace.sched_boost_task.data_frame + boost_task_rtapp = sbt_dfr[sbt_dfr.comm == rta_task_name] + + # Avoid the first period as the task starts with a very + # high load and it overutilizes the CPU + rtapp_period = first_task_params[first_task_name]["params"]["period_ms"] + task_start = boost_task_rtapp.index[0] + after_first_period = task_start + rtapp_period + boost_task_rtapp = boost_task_rtapp.ix[after_first_period:] + + sched_load_scale = 1024 + boost = tc["cgroups"]["conf"]["schedtune"]["/stune"]["boost"] / 100. + util = boost_task_rtapp["util"] + expected_margin = (sched_load_scale - util) * boost + expected_margin = expected_margin.astype(int) + boost_task_rtapp["expected_margin"] = expected_margin + ftrace.add_parsed_event("boost_task_rtapp", boost_task_rtapp) + + analyzer = Analyzer(ftrace, {}) + statement = "boost_task_rtapp:margin == boost_task_rtapp:expected_margin" + error_msg = "task was not boosted to the expected margin: {}".\ + format(boost) + self.assertTrue(analyzer.assertStatement(statement), msg=error_msg) + +# vim :set tabstop=4 shiftwidth=4 expandtab