diff --git a/.travis.yml b/.travis.yml index 8922ed4223e1b6ab0798e30c3439e381b5d2067b..e1fe75428bfe7449cfb33fbfa2db17a446b67117 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,7 @@ sudo: required language: python install: - - pip install --upgrade Cython trappy bart-py devlib psutil wrapt ipython jupyter + - pip install --upgrade Cython trappy bart-py devlib psutil wrapt ipython jupyter matplotlib script: - cd $TRAVIS_BUILD_DIR - source init_env && lisa-test tests/lisa/ diff --git a/ipynb/tests/Generic_EAS_Tests.ipynb b/ipynb/tests/Generic_EAS_Tests.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..fb64d8015bd78c265b2d3658ad3808b2b9b1c229 --- /dev/null +++ b/ipynb/tests/Generic_EAS_Tests.ipynb @@ -0,0 +1,697 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "# Run Generic Automated EAS tests\n", + "This is a starting-point notebook for running tests from the generic EAS suite in `tests/eas/generic.py`. The test classes that are imported here provide helper methods to aid analysis of the cause of failure. You can use Python's `help` built-in to find those methods (or you can just read the docstrings in the code).\n", + "\n", + "These tests make estimation of the energy efficiency of task placements, without directly examining the behaviour of cpufreq or cpuidle. Several test classes are provided, the only difference between them being the workload that is used." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:17,847 INFO : root : Using LISA logging configuration:\n", + "2017-01-23 18:50:17,847 INFO : root : /home/brendan/sources/lisa/logging.conf\n" + ] + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib inline\n", + "\n", + "import logging\n", + "from conf import LisaLogging\n", + "LisaLogging.setup()#level=logging.WARNING)\n", + "\n", + "import pandas as pd\n", + "\n", + "from perf_analysis import PerfAnalysis\n", + "\n", + "import trappy\n", + "from trappy import ILinePlot\n", + "from trappy.stats.grammar import Parser" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run test workload\n", + "\n", + "If you simply want to run all the tests and get pass/fail results, use this command in the LISA shell: `lisa-test tests/eas/generic.py`. This notebook is intended as a starting point for analysing what scheduler behaviour was judged to be faulty.\n", + "\n", + "Target configuration is taken from `$LISA_HOME/target.config` - you'll need to edit that file to provide connection details for the target you want to test." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from tests.eas.generic import TwoBigTasks, TwoBigThreeSmall, RampUp, RampDown, EnergyModelWakeMigration, OneSmallTask" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default we'll run the EnergyModelWakeMigration test, which runs a workload alternating between high and low-intensity. All the other test classes shown above have the same interface, but run different workloads. To run the tests on different workloads, change this line below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " Test EAS for tasks alternating beetween 10% and 50%\n", + " \n" + ] + } + ], + "source": [ + "t = EnergyModelWakeMigration(methodName=\"test_task_placement\")\n", + "print t.__doc__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:18,176 INFO : LisaTest : Setup tests execution engine...\n", + "2017-01-23 18:50:18,178 INFO : TestEnv : Using base path: /home/brejac01/sources/lisa\n", + "2017-01-23 18:50:18,178 INFO : TestEnv : Loading default (file) target configuration\n", + "2017-01-23 18:50:18,179 INFO : TestEnv : Loading target configuration [/home/brejac01/sources/lisa/target.config]...\n", + "2017-01-23 18:50:18,181 INFO : TestEnv : Loading custom (inline) test configuration\n", + "2017-01-23 18:50:18,182 INFO : TestEnv : Devlib modules to load: ['bl', u'cpuidle', 'cgroups', 'cpufreq']\n", + "2017-01-23 18:50:18,183 INFO : TestEnv : Connecting linux target:\n", + "2017-01-23 18:50:18,184 INFO : TestEnv : username : brendan\n", + "2017-01-23 18:50:18,185 INFO : TestEnv : host : 192.168.2.2\n", + "2017-01-23 18:50:18,186 INFO : TestEnv : password : password\n", + "2017-01-23 18:50:18,187 INFO : TestEnv : Connection settings:\n", + "2017-01-23 18:50:18,188 INFO : TestEnv : {'username': u'brendan', 'host': u'192.168.2.2', 'password': u'password'}\n", + "2017-01-23 18:50:25,803 INFO : TestEnv : Initializing target workdir:\n", + "2017-01-23 18:50:25,805 INFO : TestEnv : /home/brendan/devlib-target\n", + "2017-01-23 18:50:29,349 INFO : CGroups : Available controllers:\n", + "2017-01-23 18:50:31,076 INFO : CGroups : schedtune : /home/brendan/devlib-target/cgroups/devlib_cgh2\n", + "2017-01-23 18:50:31,749 INFO : TestEnv : Topology:\n", + "2017-01-23 18:50:31,751 INFO : TestEnv : [[0, 3, 4, 5], [1, 2]]\n", + "2017-01-23 18:50:34,373 INFO : TestEnv : Loading default EM:\n", + "2017-01-23 18:50:34,374 INFO : TestEnv : /home/brejac01/sources/lisa/libs/utils/platforms/juno.json\n", + "2017-01-23 18:50:36,104 INFO : TestEnv : Enabled tracepoints:\n", + "2017-01-23 18:50:36,106 INFO : TestEnv : sched_overutilized\n", + "2017-01-23 18:50:36,107 INFO : TestEnv : sched_energy_diff\n", + "2017-01-23 18:50:36,108 INFO : TestEnv : sched_load_avg_task\n", + "2017-01-23 18:50:36,109 INFO : TestEnv : sched_load_avg_cpu\n", + "2017-01-23 18:50:36,110 INFO : TestEnv : sched_migrate_task\n", + "2017-01-23 18:50:36,111 INFO : TestEnv : sched_switch\n", + "2017-01-23 18:50:36,112 INFO : TestEnv : cpu_frequency\n", + "2017-01-23 18:50:36,114 INFO : EnergyMeter : HWMON module not enabled\n", + "2017-01-23 18:50:36,115 WARNING : EnergyMeter : Energy sampling disabled by configuration\n", + "2017-01-23 18:50:36,116 INFO : TestEnv : Set results folder to:\n", + "2017-01-23 18:50:36,117 INFO : TestEnv : /home/brejac01/sources/lisa/results/20170123_185036\n", + "2017-01-23 18:50:36,118 INFO : TestEnv : Experiment results available also in:\n", + "2017-01-23 18:50:36,119 INFO : TestEnv : /home/brejac01/sources/lisa/results_latest\n", + "2017-01-23 18:50:36,121 INFO : Executor : Loading custom (inline) test configuration\n", + "2017-01-23 18:50:36,121 INFO : Executor : \n", + "2017-01-23 18:50:36,123 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:36,124 INFO : Executor : Experiments configuration\n", + "2017-01-23 18:50:36,125 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:36,126 INFO : Executor : Configured to run:\n", + "2017-01-23 18:50:36,127 INFO : Executor : 1 target configurations:\n", + "2017-01-23 18:50:36,128 INFO : Executor : energy_aware\n", + "2017-01-23 18:50:36,129 INFO : Executor : 1 workloads (1 iterations each)\n", + "2017-01-23 18:50:36,130 INFO : Executor : wake_migration\n", + "2017-01-23 18:50:36,131 INFO : Executor : Total: 1 experiments\n", + "2017-01-23 18:50:36,132 INFO : Executor : Results will be collected under:\n", + "2017-01-23 18:50:36,133 INFO : Executor : /home/brejac01/sources/lisa/results/20170123_185036\n", + "2017-01-23 18:50:36,134 INFO : Executor : rt-app workloads found, installing tool on target\n", + "2017-01-23 18:50:38,252 INFO : LisaTest : Experiments execution...\n", + "2017-01-23 18:50:38,254 INFO : Executor : \n", + "2017-01-23 18:50:38,255 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:38,256 INFO : Executor : Experiments execution\n", + "2017-01-23 18:50:38,257 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:38,258 INFO : Executor : \n", + "2017-01-23 18:50:38,259 INFO : Executor : ================================================================================\n", + "2017-01-23 18:50:38,260 INFO : Executor : configuring target for [energy_aware] experiments\n", + "2017-01-23 18:50:39,976 INFO : Executor : Set scheduler feature: ENERGY_AWARE\n", + "2017-01-23 18:50:40,881 WARNING : Executor : cpufreq governor not specified, using currently configured governor\n", + "2017-01-23 18:50:40,883 INFO : Workload : Setup new workload wake_migration\n", + "2017-01-23 18:50:40,884 INFO : Workload : Workload duration defined by longest task\n", + "2017-01-23 18:50:40,885 INFO : Workload : Default policy: SCHED_OTHER\n", + "2017-01-23 18:50:40,886 INFO : Workload : ------------------------\n", + "2017-01-23 18:50:40,888 INFO : Workload : task [task_wmig0], sched: using default policy\n", + "2017-01-23 18:50:40,889 INFO : Workload : | calibration CPU: 1\n", + "2017-01-23 18:50:40,890 INFO : Workload : | loops count: 2\n", + "2017-01-23 18:50:40,891 INFO : Workload : + phase_000001: duration 1.000000 [s] (10 loops)\n", + "2017-01-23 18:50:40,892 INFO : Workload : | period 100000 [us], duty_cycle 10 %\n", + "2017-01-23 18:50:40,893 INFO : Workload : | run_time 10000 [us], sleep_time 90000 [us]\n", + "2017-01-23 18:50:40,894 INFO : Workload : + phase_000002: duration 1.000000 [s] (10 loops)\n", + "2017-01-23 18:50:40,895 INFO : Workload : | period 100000 [us], duty_cycle 50 %\n", + "2017-01-23 18:50:40,896 INFO : Workload : | run_time 50000 [us], sleep_time 50000 [us]\n", + "2017-01-23 18:50:40,897 INFO : Workload : ------------------------\n", + "2017-01-23 18:50:40,897 INFO : Workload : task [task_wmig1], sched: using default policy\n", + "2017-01-23 18:50:40,898 INFO : Workload : | calibration CPU: 1\n", + "2017-01-23 18:50:40,899 INFO : Workload : | loops count: 2\n", + "2017-01-23 18:50:40,900 INFO : Workload : + phase_000001: duration 1.000000 [s] (10 loops)\n", + "2017-01-23 18:50:40,901 INFO : Workload : | period 100000 [us], duty_cycle 10 %\n", + "2017-01-23 18:50:40,902 INFO : Workload : | run_time 10000 [us], sleep_time 90000 [us]\n", + "2017-01-23 18:50:40,903 INFO : Workload : + phase_000002: duration 1.000000 [s] (10 loops)\n", + "2017-01-23 18:50:40,904 INFO : Workload : | period 100000 [us], duty_cycle 50 %\n", + "2017-01-23 18:50:40,905 INFO : Workload : | run_time 50000 [us], sleep_time 50000 [us]\n", + "2017-01-23 18:50:41,966 INFO : Executor : ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "2017-01-23 18:50:41,968 INFO : Executor : Experiment 0/1, [energy_aware:wake_migration] 1/1\n", + "2017-01-23 18:50:41,976 WARNING : Executor : FTrace events collection enabled\n", + "2017-01-23 18:50:48,009 INFO : Workload : Workload execution START:\n", + "2017-01-23 18:50:48,011 INFO : Workload : /home/brendan/devlib-target/bin/rt-app /home/brendan/devlib-target/run_dir/wake_migration_00.json 2>&1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:57,357 INFO : Executor : Collected FTrace binary trace:\n", + "2017-01-23 18:50:57,359 INFO : Executor : /rtapp:energy_aware:wake_migration/1/trace.dat\n", + "2017-01-23 18:50:57,360 INFO : Executor : Collected FTrace function profiling:\n", + "2017-01-23 18:50:57,361 INFO : Executor : /rtapp:energy_aware:wake_migration/1/trace_stat.json\n", + "2017-01-23 18:50:57,362 INFO : Executor : --------------------------------------------------------------------------------\n", + "2017-01-23 18:50:57,363 INFO : Executor : \n", + "2017-01-23 18:50:57,365 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:57,366 INFO : Executor : Experiments execution completed\n", + "2017-01-23 18:50:57,367 INFO : Executor : ################################################################################\n", + "2017-01-23 18:50:57,368 INFO : Executor : Results available in:\n", + "2017-01-23 18:50:57,369 INFO : Executor : /home/brejac01/sources/lisa/results/20170123_185036\n" + ] + } + ], + "source": [ + "t.setUpClass()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "experiment = t.executor.experiments[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Examine trace\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`get_power_df` and `get_expected_power_df` look at the ftrace results from the workload estimation and judge the energy efficiency of the system, considering *only task placement* (assuming perfect load-tracking/prediction, cpuidle, and cpufreq systems). The energy estimation doesn't take every single wakeup and idle period into account, but simply estimates an average power usage over the time that each task spent attached to each CPU during each phase of the rt-app workload.\n", + "\n", + "These return DataFrames estimating the energy usage of the system under each task placement. `estimated_power` will represent this estimation for the scheduling pattern that we actually observed, while `expected_power` will represent our estimation of how much power an **optimal** scheduling pattern would use.\n", + "\n", + "Check the docstrings for these functions (and other functions in the test class) for more detail." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": false + }, + "outputs": [], + "source": [ + "# print t.get_power_df.__doc__\n", + "estimated_power = t.get_power_df(experiment)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:58,352 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,355 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:58,356 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,361 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:58,363 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,367 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:58,369 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,374 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:58,375 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,378 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:58,380 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:58,381 INFO : EnergyModel : EnergyModel - Done\n" + ] + } + ], + "source": [ + "# print t.get_expected_power_df.__doc__\n", + "expected_power = t.get_expected_power_df(experiment)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plot Schedule" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "trace = t.get_trace(experiment)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + "\n", + "\n", + "\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "trappy.plotter.plot_trace(trace.ftrace)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plot estimated ideal and estimated power usage" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This plot shows how the power estimation for the observed scheduling pattern varies from the estimated power for an ideal schedule.\n", + "\n", + "Where the plotted value for the observed power is higher than the plotted ideal power, the system was wasting power (e.g. a low-intensity task was unnecessarily placed on a high-power CPU). Where the observed value is *lower* than the ideal value, this means the system was *too* efficient (e.g. a high-intensity task was placed on a low-power CPU that could not accomadate its compute requirements)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "df = pd.concat([\n", + " expected_power.sum(axis=1), estimated_power.sum(axis=1)], \n", + " axis=1, keys=['ideal_power', 'observed_power']).fillna(method='ffill')\n", + "\n", + "ILinePlot(df, column=df.columns.tolist(), drawstyle='steps-post').view()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plot CPU frequency" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:59,019 WARNING : Analysis : Events [cpu_frequency] not found, plot DISABLED!\n" + ] + } + ], + "source": [ + "trace.analysis.frequency.plotClusterFrequencies()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assertions\n", + "These are the assertions used to generate pass/fail results s. They aren't very useful in this interactive context - it's much more interesting to examine plots like the one above and see whether the behaviour was desirable or not. These are intended for automated regression testing. Nonetheless, let's see what the results would be for this run." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`test_slack` checks the \"slack\" reported by the rt-app workload. If this slack was negative, this means the workload didn't receive enough CPU capacity. In a real system this would represent lacking interactive performance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "test_slack failed:\n", + "task task_wmig0 missed 10.0% of activations\n", + "\tCheck trace file: /home/brejac01/sources/lisa/results/20170123_185036/rtapp:energy_aware:wake_migration/1/trace.dat\n" + ] + } + ], + "source": [ + "try:\n", + " t.test_slack()\n", + "except AssertionError as e:\n", + " print \"test_slack failed:\"\n", + " print e\n", + "else:\n", + " print \"test_slack passed\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`test_task_placement` checks that the task placement was energy efficient, taking advantage of lower-power CPUs whenever possible." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2017-01-23 18:50:59,076 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,079 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:59,080 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,083 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:59,084 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,086 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:59,087 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,090 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:59,092 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,094 INFO : EnergyModel : EnergyModel - Done\n", + "2017-01-23 18:50:59,095 INFO : EnergyModel : EnergyModel - Searching 36 configurations for optimal task placement...\n", + "2017-01-23 18:50:59,096 INFO : EnergyModel : EnergyModel - Done\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "test_task_placement passed\n" + ] + } + ], + "source": [ + "try:\n", + " t.test_task_placement()\n", + "except AssertionError as e:\n", + " print \"test_task_placement failed:\"\n", + " print e\n", + "else:\n", + " print \"test_task_placement passed\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/libs/utils/energy_model.py b/libs/utils/energy_model.py new file mode 100644 index 0000000000000000000000000000000000000000..f949f1b0632dcd56b9e38c23422644dd79b92a13 --- /dev/null +++ b/libs/utils/energy_model.py @@ -0,0 +1,630 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import namedtuple +from itertools import product +import logging +import operator + +import pandas as pd +import numpy as np + +from devlib.utils.misc import memoized + +"""Classes for modeling and estimating energy usage of CPU systems""" + +class EnergyModelCapacityError(Exception): + """Used by :meth:`EnergyModel.get_optimal_placements`""" + pass + +class ActiveState(namedtuple('ActiveState', ['capacity', 'power'])): + """Represents power and compute capacity at a given frequency + + :param capacity: Relative compute capacity at frequency + :param power: Power usage at frequency + """ + def __new__(cls, capacity=None, power=None): + return super(ActiveState, cls).__new__(cls, capacity, power) + +class _CpuTree(object): + """Internal class. Abstract representation of a CPU topology. + + Each node contains either a single CPU or a set of child nodes. + """ + def __init__(self, cpu, children): + if (cpu is None) == (children is None): + raise ValueError('Provide exactly one of: cpu or children') + + self.parent = None + + if cpu is not None: + self.cpus = (cpu,) + self.children = [] + else: + if len(children) == 0: + raise ValueError('children cannot be empty') + self.cpus = tuple(sorted(set().union(*[n.cpus for n in children]))) + self.children = children + for child in children: + child.parent = self + + self.name = None + + def __repr__(self): + name_bit = '' + if self.name: + name_bit = 'name="{}", '.format(self.name) + + if self.children: + return '{}({}children={})'.format( + self.__class__.__name__, name_bit, self.children) + else: + return '{}({}cpus={})'.format( + self.__class__.__name__, name_bit, self.cpus) + + def _iter(self, include_non_leaves): + for child in self.children: + for child_i in child._iter(include_non_leaves): + yield child_i + if include_non_leaves or not self.children: + yield self + + def iter_nodes(self): + """Iterate over nodes depth-first, post-order""" + return self._iter(True) + + def iter_leaves(self): + """Iterate over leaves""" + return self._iter(False) + +class EnergyModelNode(_CpuTree): + """Describes topology and energy data for an EnergyModel. + + Represents a CPU topology with energy data. The active and idle state data + represents the power usage of just the hardware resources of this topology + level, not its children. e.g. If the node represents a cluster, the power + numbers should not include power used by the CPU - that power should be + included the data of the child nodes. + + Exactly one of ``cpu`` and ``children`` must be given. + + :param active_states: Dict mapping frequencies to :class:`ActiveState` + values. Compute capacity data is optional for + non-leaf nodes. + :param idle_states: Dict mapping idle state names to power usage values + :param cpu: The CPU this node represents. If provided, this is a leaf node. + :type cpus: tuple(int) + :param children: Non-empty list of child :class:`EnergyModelNode` objects + :param name: Optional human-readable name for this node. Leaf (CPU) nodes + have a default name of "cpuN" where N is the cpu number. + + :ivar cpus: CPUs contained in this node. Includes those of child nodes. + """ + def __init__(self, active_states, idle_states, + cpu=None, children=None, name=None): + super(EnergyModelNode, self).__init__(cpu, children) + + def is_monotonic(l, decreasing=False): + op = operator.ge if decreasing else operator.le + return all(op(a, b) for a, b in zip(l, l[1:])) + + if active_states: + # Sanity check for active_states's frequencies + freqs = active_states.keys() + if not is_monotonic(freqs): + raise ValueError( + 'Active states frequencies are expected to be ' + 'monotonically increasing. Freqs: {}'.format(freqs)) + + # Sanity check for active_states's powers + power_vals = [s.power for s in active_states.values()] + if not is_monotonic(power_vals): + raise ValueError( + 'Active states powers are expected to be ' + 'monotonically increasing. Values: {}'.format(power_vals)) + + # Sanity check for idle_states powers + if idle_states: + power_vals = idle_states.values() + if not is_monotonic(power_vals, decreasing=True): + raise ValueError( + 'Idle states powers are expected to be ' + 'monotonically decreasing. Values: {}'.format(power_vals)) + + if cpu is not None and not name: + name = 'cpu' + str(cpu) + + self.name = name + self.active_states = active_states + self.idle_states = idle_states + + @property + def max_capacity(self): + """Compute capacity at highest frequency""" + return max(s.capacity for s in self.active_states.values()) + +class EnergyModelRoot(EnergyModelNode): + """ + Convenience class for root of an EnergyModelNode tree. + + Just like EnergyModelNode except that ``active_states`` and ``idle_states`` + aren't required. + """ + def __init__(self, active_states=None, idle_states=None, + cpu=None, children=None, name=None): + return super(EnergyModelRoot, self).__init__( + active_states, idle_states, cpu, children, name) + +class PowerDomain(_CpuTree): + """Describes the power domain hierarchy for an EnergyModel. + + Power domains are a description of the topological dependencies in hardware + for entering idle states. "Composite" states such as cluster-sleep states + require a set of CPUs to all be idle before that state can be entered. In + that case those CPUs can be grouped into a power domain, and that composite + state attached to the power domain. Note that cpuidle is not aware of these + dependencies; they are typically handled by the platform firmware. + + Exactly one of ``cpu`` and ``children`` must be given. That is, leaves of + the PowerDomain tree always contain exactly one CPU - each CPU is + represented as being in a power domain of its own. This represents the + assumption that all CPUs have at least one idle state (such as ARM WFI) that + they can enter independently of other CPUs. + + :param idle_states: List of names of idle states for this power domain. Does + not store power data - these names are used as keys into + the ``idle_states`` field of :class:`EnergyModelNode` + objects. + :param cpu: The CPU this node represents. If provided, this is a leaf node. + :param children: Non-empty list of child :class:`PowerDomain` objects + + :ivar cpus: CPUs contained in this node. Includes those of child nodes. + :type cpus: tuple(int) + """ + def __init__(self, idle_states, cpu=None, children=None): + super(PowerDomain, self).__init__(cpu, children) + self.idle_states = idle_states + +class EnergyModel(object): + """Represents hierarchical CPU topology with power and capacity data + + An energy model consists of + + - A CPU topology, representing the physical (cache/interconnect) topology of + the CPUs. Each node stores the energy usage of that node's hardware when + it is in each active or idle state. They also store a compute capacity at + each frequency, but this is only meaningful for leaf nodes (CPUs) and may + be None at higher levels. These capacity values are relative; the maximum + capacity would usually be 1024, the value of SCHED_CAPACITY_SCALE in the + Linux kernel scheduler. Use EnergyModelNodes to describe this. + + - A power domain topology, representing the hierarchy of areas that can be + powered down (idled). + The power domains are a single tree. Leaf nodes must contain exactly one + CPU and the root node must indirectly contain every CPU. Each power domain + has a list (maybe empty) of names of idle states that that domain can + enter. + Use PowerDomains to describe this. + + - A set of frequency domains, representing groups of CPUs whose clock + frequencies must be equal (probably because they share a clock). The + frequency domains must be a partition of the CPUs. + + :ivar cpu_nodes: List of leaf (CPU) :class:`EnergyModelNode` + :ivar cpus: List of logical CPU numbers in the system + + :param root_node: Root of :class:`EnergyModelNode` tree + :param root_power_domain: Root of :class:`PowerDomain` tree + :param freq_domains: Collection of collections of logical CPU numbers + representing frequency (clock) domains. + + .. note:: + The most signficant shortcomings of the model are: + + 1. Voltage domains are assumed to be congruent to frequency domains + + 2. Idle state power is assumed to be independent of voltage + + 3. Temperature is ignored entirely + + .. _cpu-utils: + + .. admonition:: ``cpu_utils``: CPU util distributions + + Used throughout this module: A ``cpu_utils`` is a list ``u`` where + ``u[N]`` is the sum of the frequency-invariant, capacity-invariant + utilization of tasks placed on CPU N. That is, the quantity represented + by a CPU runqueue's util_avg in the Linux kernel scheduler's + load-tracking system with EAS features enabled. + + The range of utilization values is 0 - + :attr:`EnergyModel.capacity_scale`. + + This represents a static utilization, assuming that tasks don't change + in size (for example representing a set of fixed periodic RT-App + workloads). For workloads that change over time, a series of + ``cpu_utils`` items would be needed to describe the utilization, with a + distinct estimation for each item in the series. + """ + + capacity_scale = 1024 + """The relative computational capacity of the most powerful CPU at its + highest available frequency. + """ + + def __init__(self, root_node, root_power_domain, freq_domains): + self.cpus = root_node.cpus + if self.cpus != tuple(range(len(self.cpus))): + raise ValueError('CPU IDs [{}] are sparse'.format(self.cpus)) + + # Check that freq_domains is a partition of the CPUs + fd_intersection = set().intersection(*freq_domains) + if fd_intersection: + raise ValueError('CPUs {} exist in multiple freq domains'.format( + fd_intersection)) + fd_difference = set(self.cpus) - set().union(*freq_domains) + if fd_difference: + raise ValueError('CPUs {} not in any frequency domain'.format( + fd_difference)) + self.freq_domains = freq_domains + + # Check that nodes with energy data are all within a frequency domain + for node in root_node.iter_nodes(): + if not node.active_states or node.idle_states: + continue + cpu_freq_doms = [] + for cpu in node.cpus: + [cpu_freq_dom] = [d for d in freq_domains if cpu in d] + cpu_freq_doms.append(cpu_freq_dom) + if not all(d == cpu_freq_doms[0] for d in cpu_freq_doms[1:]): + raise ValueError( + 'Node {} (CPUs {}) ' + 'has energy data and overlaps freq domains'.format( + node.name, node.cpus)) + + def sorted_leaves(root): + # Get a list of the leaf (cpu) nodes of a _CpuTree in order of the + # CPU ID + ret = sorted(list(root.iter_leaves()), key=lambda n: n.cpus[0]) + assert all(len(n.cpus) == 1 for n in ret) + return ret + + self.root = root_node + self.cpu_nodes = sorted_leaves(root_node) + self.cpu_pds = sorted_leaves(root_power_domain) + assert len(self.cpu_pds) == len(self.cpu_nodes) + + self._log = logging.getLogger('EnergyModel') + + max_cap = max(n.max_capacity for n in self.cpu_nodes) + if max_cap != self.capacity_scale: + self._log.warning( + 'Unusual max capacity (%s), overriding capacity_scale', max_cap) + self.capacity_scale = max_cap + + def _cpus_with_capacity(self, cap): + """ + Helper method to find the CPUs whose max capacity equals cap + """ + return [c for c in self.cpus + if self.cpu_nodes[c].max_capacity == cap] + + @property + @memoized + def biggest_cpus(self): + """ + The CPUs with the highest compute capacity at their highest frequency + """ + return self._cpus_with_capacity(self.capacity_scale) + + @property + @memoized + def littlest_cpus(self): + """ + The CPUs with the lowest compute capacity at their highest frequency + """ + min_cap = min(n.max_capacity for n in self.cpu_nodes) + return self._cpus_with_capacity(min_cap) + + @property + @memoized + def is_heterogeneous(self): + """ + True iff CPUs do not all have the same efficiency and OPP range + """ + states = self.cpu_nodes[0].active_states + return any(c.active_states != states for c in self.cpu_nodes[1:]) + + def _guess_idle_states(self, cpus_active): + def find_deepest(pd): + if not any(cpus_active[c] for c in pd.cpus): + if pd.parent: + parent_state = find_deepest(pd.parent) + if parent_state: + return parent_state + return pd.idle_states[-1] if len(pd.idle_states) else None + return None + + return [find_deepest(pd) for pd in self.cpu_pds] + + def guess_idle_states(self, cpus_active): + """Pessimistically guess the idle states that each CPU may enter + + If a CPU has any tasks it is estimated that it may only enter its + shallowest idle state in between task activations. If all the CPUs + within a power domain have no tasks, they will all be judged able to + enter that domain's deepest idle state. If any CPU in a domain has work, + no CPUs in that domain are assumed to enter any domain shared state. + + e.g. Consider a system with + + - two power domains PD0 and PD1 + + - 4 CPUs, with CPUs [0, 1] in PD0 and CPUs [2, 3] in PD1 + + - 4 idle states: "WFI", "cpu-sleep", "cluster-sleep-0" and + "cluster-sleep-1", where the "cluster-sleep-*" states domain states, + i.e. a CPU can only enter those states when both CPUs in the domain + are idle. + + Then here are some example inputs and outputs: + + :: + + # All CPUs idle: + [0, 0, 0, 0] -> ["cluster-sleep-1", "cluster-sleep-1", + "cluster-sleep-1", "cluster-sleep-1"] + + # All CPUs have work + [1, 1, 1, 1] -> ["WFI","WFI","WFI", "WFI"] + + # One power domain active, the other idle + [0, 0, 1, 1] -> ["cluster-sleep-1", "cluster-sleep-1", "WFI","WFI"] + + # One CPU active. + # Note that CPU 2 has no work but is assumed to never be able to enter + # any "cluster" state. + [0, 0, 0, 1] -> ["cluster-sleep-1", "cluster-sleep-1", + "cpu-sleep","WFI"] + + :param cpus_active: list where bool(cpus_active[N]) is False iff no + tasks will run on CPU N. + :returns: List ``ret`` where ``ret[N]`` is the name of the estimated + idle state that CPU N can enter during idle periods. + + """ + states = self._guess_idle_states(cpus_active) + return [s or c.idle_states.keys()[0] + for s, c in zip(states, self.cpu_nodes)] + + def _guess_freqs(self, cpu_utils): + overutilized = False + # Find what frequency each CPU would need if it was alone in its + # frequency domain + ideal_freqs = [0 for _ in self.cpus] + for node in self.cpu_nodes: + [cpu] = node.cpus + required_cap = cpu_utils[cpu] + + possible_freqs = [f for f, s in node.active_states.iteritems() + if s.capacity >= required_cap] + + if possible_freqs: + ideal_freqs[cpu] = min(possible_freqs) + else: + # CPU cannot provide required capacity, use max freq + ideal_freqs[cpu] = max(node.active_states.keys()) + overutilized = True + + # Rectify the frequencies among domains + freqs = [0 for _ in ideal_freqs] + for domain in self.freq_domains: + domain_freq = max(ideal_freqs[c] for c in domain) + for cpu in domain: + freqs[cpu] = domain_freq + + return freqs, overutilized + + def guess_freqs(self, cpu_utils): + """Work out CPU frequencies required to execute a workload + + Find the lowest possible frequency for each CPU that provides enough + capacity to satisfy the utilization, taking into account frequency + domains. + + :param cpu_utils: Utilization distribution, see + :ref:`cpu_utils ` + :returns: List ``ret`` where ``ret[N]`` is the frequency that CPU N must + run at + """ + freqs, _ = self._guess_freqs(cpu_utils) + return freqs + + def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states, + combine): + """Helper for estimate_from_cpu_util + + Like estimate_from_cpu_util but uses active time i.e. proportion of time + spent not-idle in the range 0.0 - 1.0. + + If combine=False, return idle and active power as separate components. + """ + power = 0 + ret = {} + + assert all(0.0 <= a <= 1.0 for a in cpu_active_time) + + for node in self.root.iter_nodes(): + # Some nodes might not have energy model data, they could just be + # used to group other nodes (likely the root node, for example). + if not node.active_states or not node.idle_states: + continue + + cpus = tuple(node.cpus) + # For now we assume topology nodes with energy models do not overlap + # with frequency domains + freq = freqs[cpus[0]] + assert all(freqs[c] == freq for c in cpus[1:]) + + # The active time of a node is estimated as the max of the active + # times of its children. + # This works great for the synthetic periodic workloads we use in + # LISA (where all threads wake up at the same time) but is probably + # no good for real workloads. + active_time = max(cpu_active_time[c] for c in cpus) + active_power = node.active_states[freq].power * active_time + + _idle_power = max(node.idle_states[idle_states[c]] for c in cpus) + idle_power = _idle_power * (1 - active_time) + + if combine: + ret[cpus] = active_power + idle_power + else: + ret[cpus] = {} + ret[cpus]["active"] = active_power + ret[cpus]["idle"] = idle_power + + return ret + + def estimate_from_cpu_util(self, cpu_utils, freqs=None, idle_states=None): + """ + Estimate the energy usage of the system under a utilization distribution + + Optionally also take freqs; a list of frequencies at which each CPU is + assumed to run, and idle_states, the idle states that each CPU can enter + between activations. If not provided, they will be estimated assuming an + ideal selection system (i.e. perfect cpufreq & cpuidle governors). + + :param cpu_utils: Utilization distribution, see + :ref:`cpu_utils ` + :param freqs: List of CPU frequencies. Got from :meth:`guess_freqs` by + default. + :param idle_states: List of CPU frequencies. Got from + :meth:`guess_idle_states` by default. + + :returns: Dict with power in bogo-Watts (bW), with contributions from + each system component keyed with a tuple of the CPUs + comprising that component (i.e. :attr:EnergyModelNode.cpus) + + :: + + { + (0,) : 10, + (1,) : 10, + (0, 1) : 5, + } + + This represents CPUs 0 and 1 each using 10bW and their shared + resources using 5bW for a total of 25bW. + """ + if len(cpu_utils) != len(self.cpus): + raise ValueError( + 'cpu_utils length ({}) must equal CPU count ({})'.format( + len(cpu_utils), len(self.cpus))) + + if freqs is None: + freqs = self.guess_freqs(cpu_utils) + if idle_states is None: + idle_states = self.guess_idle_states(cpu_utils) + + cpu_active_time = [] + for cpu, node in enumerate(self.cpu_nodes): + assert (cpu,) == node.cpus + cap = node.active_states[freqs[cpu]].capacity + cpu_active_time.append(min(float(cpu_utils[cpu]) / cap, 1.0)) + + return self._estimate_from_active_time(cpu_active_time, + freqs, idle_states, combine=True) + + def get_optimal_placements(self, capacities): + """Find the optimal distribution of work for a set of tasks + + Find a list of candidates which are estimated to be optimal in terms of + power consumption, but that do not result in any CPU becoming + over-utilized. + + If no such candidates exist, i.e. the system being modeled cannot + satisfy the workload's throughput requirements, an + :class:`EnergyModelCapacityError` is raised. For example, if e was an + EnergyModel modeling two CPUs with capacity 1024, this error would be + raised by: + + :: + + e.get_optimal_placements({"t1": 800, "t2": 800, "t3: "800"}) + + This estimation assumes an ideal system of selecting OPPs and idle + states for CPUs. + + .. note:: + This is a brute force search taking time exponential wrt. the number + of tasks. + + :param capacities: Dict mapping tasks to expected utilization + values. These tasks are assumed not to change; they + have a single static utilization value. A set of + single-phase periodic RT-App tasks is an example of a + suitable workload for this model. + :returns: List of ``cpu_utils`` items representing distributions of work + under optimal task placements, see + :ref:`cpu_utils `. Multiple task placements + that result in the same CPU utilizations are considered + equivalent. + """ + tasks = capacities.keys() + + num_candidates = len(self.cpus) ** len(tasks) + self._log.debug( + '%14s - Searching %d configurations for optimal task placement...', + 'EnergyModel', num_candidates) + + candidates = {} + excluded = [] + for cpus in product(self.cpus, repeat=len(tasks)): + placement = {task: cpu for task, cpu in zip(tasks, cpus)} + + util = [0 for _ in self.cpus] + for task, cpu in placement.items(): + util[cpu] += capacities[task] + util = tuple(util) + + # Filter out candidate placements that have tasks greater than max + # or that we have already determined that we cannot place. + if (any(u > self.capacity_scale for u in util) or util in excluded): + continue + + if util not in candidates: + freqs, overutilized = self._guess_freqs(util) + if overutilized: + # This isn't a valid placement + excluded.append(util) + else: + power = self.estimate_from_cpu_util(util, freqs=freqs) + candidates[util] = sum(power.values()) + + if not candidates: + # The system can't provide full throughput to this workload. + raise EnergyModelCapacityError( + "Can't handle workload - total cap = {}".format( + sum(capacities.values()))) + + # Whittle down to those that give the lowest energy estimate + min_power = min(p for p in candidates.itervalues()) + ret = [u for u, p in candidates.iteritems() if p == min_power] + + self._log.debug('%14s - Done', 'EnergyModel') + return ret diff --git a/libs/utils/env.py b/libs/utils/env.py index 0be9eb0c34ce7cf5d9f0ebcd0a5d0a0cbece7aa9..46ceb6f5f4650ecceda926dcc3c6433f688bf8a9 100644 --- a/libs/utils/env.py +++ b/libs/utils/env.py @@ -26,15 +26,16 @@ import time import unittest import devlib +from devlib.utils.misc import memoized +from devlib import Platform +from trappy.stats.Topology import Topology from wlgen import RTA from energy import EnergyMeter from conf import JsonConf - -from devlib.utils.misc import memoized -from trappy.stats.Topology import Topology - -from devlib import Platform +from platforms.juno_energy import juno_energy +from platforms.hikey_energy import hikey_energy +from platforms.pixel_energy import pixel_energy USERNAME_DEFAULT = 'root' PASSWORD_DEFAULT = '' @@ -416,6 +417,7 @@ class TestEnv(ShareState): # Initialize JUNO board elif self.conf['board'].upper() in ('JUNO', 'JUNO2'): platform = devlib.platform.arm.Juno() + self.nrg_model = juno_energy self.__modules = ['bl', 'hwmon', 'cpufreq'] # Initialize OAK board @@ -423,6 +425,18 @@ class TestEnv(ShareState): platform = Platform(model='MT8173') self.__modules = ['bl', 'cpufreq'] + # Initialized HiKey board + elif self.conf['board'].upper() == 'HIKEY': + self.nrg_model = hikey_energy + self.__modules = [ "cpufreq", "cpuidle" ] + platform = Platform(model='hikey') + + # Initialize Pixel phone + elif self.conf['board'].upper() == 'PIXEL': + self.nrg_model = pixel_energy + self.__modules = ['bl', 'cpufreq'] + platform = Platform(model='pixel') + elif self.conf['board'] != 'UNKNOWN': # Initilize from platform descriptor (if available) board = self._load_board(self.conf['board']) diff --git a/libs/utils/platforms/__init__.py b/libs/utils/platforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/libs/utils/platforms/hikey_energy.py b/libs/utils/platforms/hikey_energy.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3cea7f8ed517581a979677e68308df2e647044 --- /dev/null +++ b/libs/utils/platforms/hikey_energy.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from energy_model import (ActiveState, EnergyModelNode, EnergyModelRoot, + PowerDomain, EnergyModel) + +from collections import OrderedDict + +cluster_active_states = OrderedDict([ + ( 208000, ActiveState(capacity=178, power=16)), + ( 432000, ActiveState(capacity=369, power=29)), + ( 729000, ActiveState(capacity=622, power=47)), + ( 960000, ActiveState(capacity=819, power=75)), + (1200000, ActiveState(capacity=1024, power=112)) +]) + +cluster_idle_states = OrderedDict([ + ('WFI', 47), + ('cpu-sleep', 47), + ('cluster-sleep', 0) +]) + +cpu_active_states = OrderedDict([ + ( 208000, ActiveState(capacity=178, power=69)), + ( 432000, ActiveState(capacity=369, power=125)), + ( 729000, ActiveState(capacity=622, power=224)), + ( 960000, ActiveState(capacity=819, power=367)), + (1200000, ActiveState(capacity=1024, power=670)) +]) + +cpu_idle_states = OrderedDict([ + ('WFI', 15), ('cpu-sleep', 0), ('cluster-sleep', 0) +]) + +def cpu_pd(cpu): + return PowerDomain(cpu=cpu, idle_states=['WFI', 'cpu-sleep']) + +def cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=cpu_active_states, + idle_states=cpu_idle_states) +hikey_energy = EnergyModel( + root_node=EnergyModelRoot(children=[ + EnergyModelNode(name='cluster0', + children=[cpu_node(c) for c in [0, 1, 2, 3]], + active_states=cluster_active_states, + idle_states=cluster_idle_states), + EnergyModelNode(name='cluster1', + children=[cpu_node(c) for c in [4, 5, 6, 7]], + active_states=cluster_active_states, + idle_states=cluster_idle_states)]), + root_power_domain=PowerDomain(idle_states=[], children=[ + PowerDomain(idle_states=["cluster-sleep"], children=[ + cpu_pd(c) for c in [0, 1, 2, 3]]), + PowerDomain(idle_states=["cluster-sleep"], children=[ + cpu_pd(c) for c in [4, 5, 6, 7]])]), + freq_domains=[[0, 1, 2, 3, 4, 5, 6, 7]]) diff --git a/libs/utils/platforms/juno_energy.py b/libs/utils/platforms/juno_energy.py new file mode 100644 index 0000000000000000000000000000000000000000..49c75b4a125b14df8ac753c3509f9d0d986b3aa3 --- /dev/null +++ b/libs/utils/platforms/juno_energy.py @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict + +from energy_model import (ActiveState, EnergyModelNode, EnergyModelRoot, + PowerDomain, EnergyModel) + +a53_cluster_active_states = OrderedDict([ + (450000, ActiveState(power=26)), + (575000, ActiveState(power=30)), + (700000, ActiveState(power=39)), + (775000, ActiveState(power=47)), + (850000, ActiveState(power=57)), +]) + +# TODO warn if any of the idle states aren't represented by power domains +a53_cluster_idle_states = OrderedDict([ + ("WFI", 56), + ("cpu-sleep-0", 56), + ("cluster-sleep-0", 17), +]) + +a53_cpu_active_states = OrderedDict([ + (450000, ActiveState(capacity=235, power=33)), + (575000, ActiveState(capacity=302, power=46)), + (700000, ActiveState(capacity=368, power=61)), + (775000, ActiveState(capacity=406, power=76)), + (850000, ActiveState(capacity=447, power=93)), +]) + +a53_cpu_idle_states = OrderedDict([ + ("WFI", 6), + ("cpu-sleep-0", 0), + ("cluster-sleep-0", 0), +]) + +a53s = [0, 3, 4, 5] + +def a53_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=a53_cpu_active_states, + idle_states=a53_cpu_idle_states) + +a57_cluster_active_states = OrderedDict([ + ( 450000, ActiveState(power=24)), + ( 625000, ActiveState(power=32)), + ( 800000, ActiveState(power=43)), + ( 950000, ActiveState(power=49)), + (1100000, ActiveState(power=64)), +]) + +a57_cluster_idle_states = OrderedDict([ + ("WFI", 65), + ("cpu-sleep-0", 65), + ("cluster-sleep-0", 24), +]) + +a57_cpu_active_states = OrderedDict([ + (450000, ActiveState(capacity=417, power=168)), + (625000, ActiveState(capacity=579, power=251)), + (800000, ActiveState(capacity=744, power=359)), + (950000, ActiveState(capacity=883, power=479)), + (1100000, ActiveState(capacity=1024, power=616)), +]) + +a57_cpu_idle_states = OrderedDict([ + ("WFI", 15), + ("cpu-sleep-0", 0), + ("cluster-sleep-0", 0), +]) + +a57s = [1, 2] + +def a57_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=a57_cpu_active_states, + idle_states=a57_cpu_idle_states) + +juno_energy = EnergyModel( + root_node=EnergyModelRoot( + children=[ + EnergyModelNode( + name="cluster_a57", + active_states=a57_cluster_active_states, + idle_states=a57_cluster_idle_states, + children=[a57_cpu_node(c) for c in a57s]), + EnergyModelNode( + name="cluster_a53", + active_states=a53_cluster_active_states, + idle_states=a53_cluster_idle_states, + children=[a53_cpu_node(c) for c in a53s])]), + root_power_domain=PowerDomain(idle_states=[], children=[ + PowerDomain( + idle_states=["cluster-sleep-0"], + children=[PowerDomain(idle_states=["WFI", "cpu-sleep-0"], cpu=c) + for c in a57s]), + PowerDomain( + idle_states=["cluster-sleep-0"], + children=[PowerDomain(idle_states=["WFI", "cpu-sleep-0"], cpu=c) + for c in a53s])]), + freq_domains=[a53s, a57s]) diff --git a/libs/utils/platforms/pixel_energy.py b/libs/utils/platforms/pixel_energy.py new file mode 100644 index 0000000000000000000000000000000000000000..01307eb6b31c7596640ff675beda01f3d76fc214 --- /dev/null +++ b/libs/utils/platforms/pixel_energy.py @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from energy_model import (ActiveState, EnergyModelNode, EnergyModelRoot, + PowerDomain, EnergyModel) + +from collections import OrderedDict + +silver_cpu_active_states = OrderedDict([ + ( 307200, ActiveState(capacity=149, power=90)), + ( 384000, ActiveState(capacity=188, power=111)), + ( 460800, ActiveState(capacity=225, power=133)), + ( 537600, ActiveState(capacity=257, power=160)), + ( 614400, ActiveState(capacity=281, power=182)), + ( 691200, ActiveState(capacity=315, power=210)), + ( 768000, ActiveState(capacity=368, power=251)), + ( 844800, ActiveState(capacity=406, power=306)), + ( 902400, ActiveState(capacity=428, power=332)), + ( 979200, ActiveState(capacity=469, power=379)), + (1056000, ActiveState(capacity=502, power=438)), + (1132800, ActiveState(capacity=538, power=494)), + (1209600, ActiveState(capacity=581, power=550)), + (1286400, ActiveState(capacity=611, power=613)), + (1363200, ActiveState(capacity=648, power=670)), + (1440000, ActiveState(capacity=684, power=752)), + (1516800, ActiveState(capacity=729, power=848)), + (1593600, ActiveState(capacity=763, power=925)), +]) + +silver_cluster_active_states = OrderedDict([ + ( 307200, ActiveState(power=4)), + ( 384000, ActiveState(power=4)), + ( 460800, ActiveState(power=4)), + ( 537600, ActiveState(power=4)), + ( 614400, ActiveState(power=4)), + ( 691200, ActiveState(power=4)), + ( 768000, ActiveState(power=8)), + ( 844800, ActiveState(power=9)), + ( 902400, ActiveState(power=15)), + ( 979200, ActiveState(power=16)), + (1056000, ActiveState(power=21)), + (1132800, ActiveState(power=22)), + (1209600, ActiveState(power=29)), + (1286400, ActiveState(power=32)), + (1363200, ActiveState(power=42)), + (1440000, ActiveState(power=49)), + # This power value is 41 (invalid!) in the released kernel. Patch it to + # avoid errors. + (1516800, ActiveState(power=52)), + (1593600, ActiveState(power=52)), +]) + +gold_cpu_active_states = OrderedDict([ + ( 307200, ActiveState(capacity=149, power=93)), + ( 384000, ActiveState(capacity=188, power=111)), + ( 460800, ActiveState(capacity=225, power=133)), + ( 537600, ActiveState(capacity=257, power=160)), + ( 614400, ActiveState(capacity=281, power=182)), + ( 691200, ActiveState(capacity=315, power=210)), + ( 748800, ActiveState(capacity=348, power=252)), + ( 825600, ActiveState(capacity=374, power=290)), + ( 902400, ActiveState(capacity=428, power=332)), + ( 979200, ActiveState(capacity=469, power=379)), + (1056000, ActiveState(capacity=502, power=438)), + (1132800, ActiveState(capacity=538, power=494)), + (1209600, ActiveState(capacity=581, power=550)), + (1286400, ActiveState(capacity=611, power=613)), + (1363200, ActiveState(capacity=648, power=670)), + (1440000, ActiveState(capacity=684, power=752)), + (1516800, ActiveState(capacity=729, power=848)), + (1593600, ActiveState(capacity=763, power=925)), + (1670400, ActiveState(capacity=795, power=1018)), + (1747200, ActiveState(capacity=832, power=1073)), + (1824000, ActiveState(capacity=868, power=1209)), + (1900800, ActiveState(capacity=905, power=1298)), + (1977600, ActiveState(capacity=952, power=1428)), + (2054400, ActiveState(capacity=979, power=1521)), + (2150400, ActiveState(capacity=1024, power=1715)), +]) + +gold_cluster_active_states = OrderedDict([ + ( 307200, ActiveState(power=4)), + ( 384000, ActiveState(power=4)), + ( 460800, ActiveState(power=4)), + ( 537600, ActiveState(power=4)), + ( 614400, ActiveState(power=4)), + ( 691200, ActiveState(power=4)), + ( 748800, ActiveState(power=7)), + ( 825600, ActiveState(power=10)), + ( 902400, ActiveState(power=15)), + ( 979200, ActiveState(power=16)), + (1056000, ActiveState(power=21)), + (1132800, ActiveState(power=22)), + (1209600, ActiveState(power=29)), + (1286400, ActiveState(power=32)), + (1363200, ActiveState(power=42)), + (1440000, ActiveState(power=49)), + # This power value is 41 (invalid!) in the released kernel. Patch it to + # avoid errors. + (1516800, ActiveState(power=52)), + (1593600, ActiveState(power=52)), + (1670400, ActiveState(power=62)), + (1747200, ActiveState(power=69)), + (1824000, ActiveState(power=75)), + (1900800, ActiveState(power=81)), + (1977600, ActiveState(power=90)), + (2054400, ActiveState(power=93)), + (2150400, ActiveState(power=96)), +]) + +# TODO warn if any of the idle states aren't represented by power domains +cpu_idle_states = OrderedDict([ + ("WFI", 2), + ("cpu-sleep-0", 0), + ("cluster-sleep-0", 0), +]) + +cluster_idle_states = OrderedDict([ + ("WFI", 0), + ("cpu-sleep-0", 0), + ("cluster-sleep-0", 0), +]) + +silvers = [0, 1] +golds = [2, 3] + +def silver_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=silver_cpu_active_states, + idle_states=cpu_idle_states) + +def gold_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=gold_cpu_active_states, + idle_states=cpu_idle_states) + +def cpu_pd(cpu): + return PowerDomain(cpu=cpu, idle_states=["WFI", "cpu-sleep-0"]) + +pixel_energy = EnergyModel( + root_node=EnergyModelRoot(children=[ + EnergyModelNode(name='cluster_silver', + children=[silver_cpu_node(c) for c in silvers], + active_states=silver_cluster_active_states, + idle_states=cluster_idle_states), + EnergyModelNode(name='cluster_gold', + children=[gold_cpu_node(c) for c in golds], + active_states=gold_cluster_active_states, + idle_states=cluster_idle_states)]), + root_power_domain=PowerDomain(idle_states=[], children=[ + PowerDomain(idle_states=['cluster-sleep-0'], children=[ + cpu_pd(c) for c in silvers]), + PowerDomain(idle_states=['cluster-sleep-0'], children=[ + cpu_pd(c) for c in golds])]), + freq_domains=[silvers, golds]) diff --git a/tests/eas/__init__.py b/tests/eas/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..add124c4889b4cc38dc2a4b511b5ca336f36ff70 100644 --- a/tests/eas/__init__.py +++ b/tests/eas/__init__.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016-2017, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import SkipTest + +from bart.sched.SchedMultiAssert import SchedMultiAssert +from devlib.target import TargetError +from test import LisaTest + +WORKLOAD_DURATION_S = 5 +WORKLOAD_PERIOD_MS = 10 +SWITCH_WINDOW_HALF = 0.5 +SMALL_DCYCLE = 10 +BIG_DCYCLE = 100 +STEP_HIGH_DCYCLE = 50 +STEP_LOW_DCYCLE = 10 +EXPECTED_RESIDENCY_PCT = 85 +OFFLOAD_EXPECTED_BUSY_TIME_PCT = 97 +SET_IS_BIG_LITTLE = True +SET_INITIAL_TASK_UTIL = True +OFFLOAD_MIGRATION_MIGRATOR_DELAY = 1 + +energy_aware_conf = { + "tag" : "energy_aware", + "flags" : ["ftrace", "freeze_userspace"], + "sched_features" : "ENERGY_AWARE", +} + +class _EasTest(LisaTest): + """ + Base class for EAS tests + """ + + test_conf = { + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "modules": ["cgroups"], + "cpufreq" : { + "governor" : "sched", + }, + } + + # Set to true to run a test only on heterogeneous systems + skip_on_smp = False + + @classmethod + def setUpClass(cls, *args, **kwargs): + super(_EasTest, cls).runExperiments(*args, **kwargs) + + @classmethod + def _getExperimentsConf(cls, test_env): + if cls.skip_on_smp and not test_env.nrg_model.is_heterogeneous: + raise SkipTest('Test not required on symmetric systems') + return super(_EasTest, cls)._getExperimentsConf(test_env) + + @classmethod + def _experimentsInit(cls, *args, **kwargs): + super(_EasTest, cls)._experimentsInit(*args, **kwargs) + + if SET_IS_BIG_LITTLE: + # This flag doesn't exist on mainline-integration kernels, so + # don't worry if the file isn't present (hence verify=False) + cls.target.write_value( + "/proc/sys/kernel/sched_is_big_little", 1, verify=False) + + if SET_INITIAL_TASK_UTIL: + # This flag doesn't exist on all kernels, so don't worry if the file + # isn't present (hence verify=False) + cls.target.write_value( + "/proc/sys/kernel/sched_initial_task_util", 1024, verify=False) + + def _do_test_first_cpu(self, experiment, tasks): + """Test that all tasks start on a big CPU""" + + sched_assert = self.get_multi_assert(experiment) + + self.assertTrue( + sched_assert.assertFirstCpu( + self.te.nrg_model.biggest_cpus, + rank=len(tasks)), + msg="Not all the new generated tasks started on a big CPU") diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 6669012bbaedda4c6d0d6f89ca8b2416738fb249..bb5a62859fb710787c1d94a70afb06b4d9221ee3 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -20,83 +20,24 @@ import operator import os import trappy import unittest +from unittest import SkipTest from bart.sched.SchedAssert import SchedAssert from devlib.target import TargetError -from env import TestEnv -from test import LisaTest, experiment_test - -# Global test configuration parameters -WORKLOAD_DURATION_S = 5 -WORKLOAD_PERIOD_MS = 10 -SWITCH_WINDOW_HALF = 0.5 -SMALL_DCYCLE = 10 -BIG_DCYCLE = 100 -STEP_HIGH_DCYCLE = 50 -STEP_LOW_DCYCLE = 10 -EXPECTED_RESIDENCY_PCT = 85 -OFFLOAD_EXPECTED_BUSY_TIME_PCT = 97 -SET_IS_BIG_LITTLE = True -SET_INITIAL_TASK_UTIL = True -OFFLOAD_MIGRATION_MIGRATOR_DELAY = 1 - -energy_aware_conf = { - "tag" : "energy_aware", - "flags" : ["ftrace", "freeze_userspace"], - "sched_features" : "ENERGY_AWARE", -} - -class _EasTest(LisaTest): - """ - Base class for EAS tests - """ - - test_conf = { - "ftrace" : { - "events" : [ - "sched_overutilized", - "sched_energy_diff", - "sched_load_avg_task", - "sched_load_avg_cpu", - "sched_migrate_task", - "sched_switch" - ], - }, - "modules": ["cgroups"], - } - - @classmethod - def setUpClass(cls, *args, **kwargs): - super(_EasTest, cls).runExperiments(*args, **kwargs) - - @classmethod - def _experimentsInit(cls, *args, **kwargs): - super(_EasTest, cls)._experimentsInit(*args, **kwargs) - - if SET_IS_BIG_LITTLE: - # This flag doesn't exist on mainline-integration kernels, so - # don't worry if the file isn't present (hence verify=False) - cls.target.write_value( - "/proc/sys/kernel/sched_is_big_little", 1, verify=False) - - if SET_INITIAL_TASK_UTIL: - # This flag doesn't exist on all kernels, so don't worry if the file - # isn't present (hence verify=False) - cls.target.write_value( - "/proc/sys/kernel/sched_initial_task_util", 1024, verify=False) - - def _do_test_first_cpu(self, experiment, tasks): - """Test that all tasks start on a big CPU""" - - sched_assert = self.get_multi_assert(experiment) - - self.assertTrue( - sched_assert.assertFirstCpu( - self.target.bl.bigs, - rank=len(tasks)), - msg="Not all the new generated tasks started on a big CPU") +from test import experiment_test +from . import (_EasTest, energy_aware_conf, + WORKLOAD_DURATION_S, + WORKLOAD_PERIOD_MS, + SWITCH_WINDOW_HALF, + SMALL_DCYCLE, + BIG_DCYCLE, + STEP_LOW_DCYCLE, + STEP_HIGH_DCYCLE, + EXPECTED_RESIDENCY_PCT, + OFFLOAD_EXPECTED_BUSY_TIME_PCT, + OFFLOAD_MIGRATION_MIGRATOR_DELAY) class ForkMigration(_EasTest): """ @@ -153,7 +94,7 @@ class SmallTaskPacking(_EasTest): Goal ==== - Many small tasks are packed in little cpus + Many small tasks are packed on a single cluster with the lowest capacity Detailed Description ==================== @@ -187,6 +128,8 @@ class SmallTaskPacking(_EasTest): "confs" : [energy_aware_conf] } + skip_on_smp = False + @experiment_test def test_first_cpu(self, experiment, tasks): """Small Task Packing: test first CPU""" @@ -194,20 +137,23 @@ class SmallTaskPacking(_EasTest): @experiment_test def test_small_task_residency(self, experiment, tasks): - "Small Task Packing: Test Residency (Little Cluster)" + "Small Task Packing: Test Residency" sched_assert = self.get_multi_assert(experiment) - self.assertTrue( - sched_assert.assertResidency( - "cluster", - self.target.bl.littles, - EXPECTED_RESIDENCY_PCT, - operator.ge, - percent=True, - rank=len(tasks)), - msg="Not all tasks are running on LITTLE cores for at least {}% of their execution time"\ - .format(EXPECTED_RESIDENCY_PCT)) + littlest_cpus = self.te.nrg_model.littlest_cpus + + for cpus in self.te.topology.get_level("cluster"): + if all(c in littlest_cpus for c in cpus): + if sched_assert.assertResidency( + "cluster", cpus, + EXPECTED_RESIDENCY_PCT, operator.ge, + percent=True, rank=len(tasks)): + return + + msg = "Not all tasks ran on low-capacity cluster for {}% of their time"\ + .format(EXPECTED_RESIDENCY_PCT) + raise AssertionError(msg) class OffloadMigrationAndIdlePull(_EasTest): """ @@ -296,12 +242,13 @@ class OffloadMigrationAndIdlePull(_EasTest): @experiment_test def test_first_cpu(self, experiment, tasks): """Offload Migration and Idle Pull: Test First CPU""" - self._do_test_first_cpu(experiment, tasks) + self._do_test_first_cpu(experiment, [t for t in tasks if "early" in t]) @experiment_test def test_big_cpus_fully_loaded(self, experiment, tasks): """Offload Migration and Idle Pull: Big cpus are fully loaded as long as there are tasks left to run in the system""" - num_big_cpus = len(self.target.bl.bigs) + bigs = self.te.nrg_model.biggest_cpus + num_big_cpus = len(bigs) sched_assert = self.get_multi_assert(experiment) @@ -309,9 +256,8 @@ class OffloadMigrationAndIdlePull(_EasTest): # Window of time until the first migrator finishes window = (self.get_start_time(experiment), end_times[-num_big_cpus]) - busy_time = sched_assert.getCPUBusyTime("cluster", - self.target.bl.bigs, - window=window, percent=True) + busy_time = sched_assert.getCPUBusyTime("cluster", bigs, + window=window, percent=True) msg = "Big cpus were not fully loaded while there were enough big tasks to fill them" self.assertGreater(busy_time, OFFLOAD_EXPECTED_BUSY_TIME_PCT, msg=msg) @@ -321,8 +267,7 @@ class OffloadMigrationAndIdlePull(_EasTest): for i in range(num_big_cpus-1): big_cpus_left = num_big_cpus - i - 1 window = (end_times[-num_big_cpus+i], end_times[-num_big_cpus+i+1]) - busy_time = sched_assert.getCPUBusyTime("cluster", - self.target.bl.bigs, + busy_time = sched_assert.getCPUBusyTime("cluster", bigs, window=window, percent=True) expected_busy_time = OFFLOAD_EXPECTED_BUSY_TIME_PCT * \ @@ -335,6 +280,7 @@ class OffloadMigrationAndIdlePull(_EasTest): @experiment_test def test_little_cpus_run_tasks(self, experiment, tasks): """Offload Migration and Idle Pull: Little cpus run tasks while bigs are busy""" + littles = self.te.nrg_model.littlest_cpus num_offloaded_tasks = len(tasks) / 2 @@ -350,8 +296,7 @@ class OffloadMigrationAndIdlePull(_EasTest): all_tasks_assert = self.get_multi_assert(experiment) - busy_time = all_tasks_assert.getCPUBusyTime("cluster", - self.target.bl.littles, + busy_time = all_tasks_assert.getCPUBusyTime("cluster", littles, window=window) window_len = window[1] - window[0] @@ -373,7 +318,8 @@ class OffloadMigrationAndIdlePull(_EasTest): sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) end_times = self.get_end_times(experiment) window = (0, end_times[task]) - big_residency = sa.getResidency("cluster", self.target.bl.bigs, + big_residency = sa.getResidency("cluster", + self.te.nrg_model.biggest_cpus, window=window, percent=True) msg = "Task {} didn't run on a big cpu.".format(task) @@ -393,7 +339,10 @@ class OffloadMigrationAndIdlePull(_EasTest): sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) msg = "Task {} did not finish on a big cpu".format(task) - self.assertIn(sa.getLastCpu(), self.target.bl.bigs, msg=msg) + + self.assertIn(sa.getLastCpu(), + self.te.nrg_model.biggest_cpus, + msg=msg) class WakeMigration(_EasTest): @@ -452,11 +401,11 @@ class WakeMigration(_EasTest): def _assert_switch(self, experiment, expected_switch_to, phases): if expected_switch_to == "big": - switch_from = self.target.bl.littles - switch_to = self.target.bl.bigs + switch_from = self.te.nrg_model.littlest_cpus + switch_to = self.te.nrg_model.biggest_cpus elif expected_switch_to == "little": - switch_from = self.target.bl.bigs - switch_to = self.target.bl.littles + switch_from = self.te.nrg_model.biggest_cpus + switch_to = self.te.nrg_model.littlest_cpus else: raise ValueError("Invalid expected_switch_to") diff --git a/tests/eas/generic.py b/tests/eas/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..0fcbc49489717a1c8285d4edef0ff35ba94ca01d --- /dev/null +++ b/tests/eas/generic.py @@ -0,0 +1,448 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from math import isnan + +import numpy as np +import pandas as pd + +from bart.common.Utils import area_under_curve + +from energy_model import EnergyModelCapacityError +from perf_analysis import PerfAnalysis +from test import experiment_test +from trace import Trace +from . import _EasTest, energy_aware_conf, WORKLOAD_PERIOD_MS + +class _EnergyModelTest(_EasTest): + """ + "Abstract" base class for generic EAS tests using the EnergyModel class + + Subclasses should provide a .workloads member to populate the 'wloads' field + of the experiments_conf for the Executor. A set of helper methods are + provided for making assertions about behaviour, most importantly the _test* + methods which make assertions in a generic way. + """ + + negative_slack_allowed_pct = 15 + """Percentage of RT-App task activations with negative slack allowed""" + + energy_est_threshold_pct = 20 + """ + Allowed margin for error in estimated energy cost for task placement, + compared to optimal placment. + """ + + @classmethod + def _getExperimentsConf(cls, *args, **kwargs): + return { + 'wloads' : cls.workloads, + 'confs' : [energy_aware_conf] + } + + def get_task_utils_df(self, experiment): + """ + Get a DataFrame with the *expected* utilization of each task over time + + :param experiment: The :class:Experiment to examine + :returns: A Pandas DataFrame with a column for each task, showing how + the utilization of that task varies over time + """ + util_scale = self.te.nrg_model.capacity_scale + + transitions = {} + def add_transition(time, task, util): + if time not in transitions: + transitions[time] = {task: util} + else: + transitions[time][task] = util + + # First we'll build a dict D {time: {task_name: util}} where D[t][n] is + # the expected utilization of task n from time t. + for task, params in experiment.wload.params['profile'].iteritems(): + time = self.get_start_time(experiment) + params['delay'] + add_transition(time, task, 0) + for _ in range(params.get('loops', 1)): + for phase in params['phases']: + util = (phase.duty_cycle_pct * util_scale / 100.) + add_transition(time, task, util) + time += phase.duration_s + add_transition(time, task, 0) + + index = sorted(transitions.keys()) + df = pd.DataFrame([transitions[k] for k in index], index=index) + return df.fillna(method='ffill') + + def get_task_cpu_df(self, experiment): + """ + Get a DataFrame mapping task names to the CPU they ran on + + Use the sched_switch trace event to find which CPU each task ran + on. Does not reflect idleness - tasks not running are shown as running + on the last CPU they woke on. + + :param experiment: The :class:Experiment to examine + :returns: A Pandas DataFrame with a column for each task, showing the + CPU that the task was "on" at each moment in time + """ + tasks = experiment.wload.tasks.keys() + trace = self.get_trace(experiment) + + df = trace.ftrace.sched_switch.data_frame[['next_comm', '__cpu']] + df = df[df['next_comm'].isin(tasks)] + df = df.pivot(index=df.index, columns='next_comm').fillna(method='ffill') + cpu_df = df['__cpu'] + # Drop consecutive duplicates + cpu_df = cpu_df[(cpu_df.shift(+1) != cpu_df).any(axis=1)] + return cpu_df + + def _sort_power_df_columns(self, df): + """ + Helper method to re-order the columns of a power DataFrame + + This has no significance for code, but when examining DataFrames by hand + they are easier to understand if the columns are in a logical order. + """ + node_cpus = [node.cpus for node in self.te.nrg_model.root.iter_nodes()] + return pd.DataFrame(df, columns=[c for c in node_cpus if c in df]) + + def get_power_df(self, experiment): + """ + Considering only the task placement, estimate power usage over time + + Examine a trace and use :meth:EnergyModel.estimate_from_cpu_util to get + a DataFrame showing the estimated power usage over time. This assumes + perfect cpuidle and cpufreq behaviour. + + :param experiment: The :class:Experiment to examine + :returns: A Pandas DataFrame with a column node in the energy model + (keyed with a tuple of the CPUs contained by that node) Shows + the estimated power over time. + """ + task_cpu_df = self.get_task_cpu_df(experiment) + task_utils_df = self.get_task_utils_df(experiment) + + tasks = experiment.wload.tasks.keys() + + # Create a combined DataFrame with the utilization of a task and the CPU + # it was running on at each moment. Looks like: + # utils cpus + # task_wmig0 task_wmig1 task_wmig0 task_wmig1 + # 2.375056 102.4 102.4 NaN NaN + # 2.375105 102.4 102.4 2.0 NaN + + df = pd.concat([task_utils_df, task_cpu_df], + axis=1, keys=['utils', 'cpus']) + df = df.sort_index().fillna(method='ffill') + nrg_model = self.executor.te.nrg_model + + # Now make a DataFrame with the estimated power at each moment. + def est_power(row): + cpu_utils = [0 for cpu in nrg_model.cpus] + for task in tasks: + cpu = row['cpus'][task] + util = row['utils'][task] + if not isnan(cpu): + cpu_utils[int(cpu)] += util + power = nrg_model.estimate_from_cpu_util(cpu_utils) + columns = power.keys() + return pd.Series([power[c] for c in columns], index=columns) + return self._sort_power_df_columns(df.apply(est_power, axis=1)) + + def get_expected_power_df(self, experiment): + """ + Estimate *optimal* power usage over time + + Examine a trace and use :meth:get_optimal_placements and + :meth:EnergyModel.estimate_from_cpu_util to get a DataFrame showing the + estimated power usage over time under ideal EAS behaviour. + + :param experiment: The :class:Experiment to examine + :returns: A Pandas DataFrame with a column each node in the energy model + (keyed with a tuple of the CPUs contained by that node) and a + "power" column with the sum of other columns. Shows the + estimated *optimal* power over time. + """ + task_utils_df = self.get_task_utils_df(experiment) + + nrg_model = self.te.nrg_model + + def exp_power(row): + task_utils = row.to_dict() + expected_utils = nrg_model.get_optimal_placements(task_utils) + power = nrg_model.estimate_from_cpu_util(expected_utils[0]) + columns = power.keys() + return pd.Series([power[c] for c in columns], index=columns) + return self._sort_power_df_columns( + task_utils_df.apply(exp_power, axis=1)) + + def _test_slack(self, experiment, tasks): + """ + Assert that the RTApp workload was given enough performance + + Use :class:PerfAnalysis to find instances where the experiment's RT-App + workload wasn't able to complete its activations (i.e. its reported + "slack" was negative). Assert that this happened less that + ``negative_slack_allowed_pct`` percent of the time. + + :meth:_test_task_placement asserts that estimated energy usage was + low. That will pass for runs where too *little* energy was used, + compromising performance. This method provides a separate test to + counteract that problem. + """ + + pa = PerfAnalysis(experiment.out_dir) + for task in tasks: + slack = pa.df(task)["Slack"] + + bad_activations_pct = len(slack[slack < 0]) * 100. / len(slack) + if bad_activations_pct > self.negative_slack_allowed_pct: + raise AssertionError("task {} missed {}% of activations".format( + task, bad_activations_pct)) + + def _test_task_placement(self, experiment, tasks): + """ + Test that task placement was energy-efficient + + Use :meth:get_expected_power_df and :meth:get_power_df to estimate + optimal and observed power usage for task placements of the experiment's + workload. Assert that the observed power does not exceed the optimal + power by more than 20%. + """ + exp_power = self.get_expected_power_df(experiment) + est_power = self.get_power_df(experiment) + + exp_energy = area_under_curve(exp_power.sum(axis=1), method='rect') + est_energy = area_under_curve(est_power.sum(axis=1), method='rect') + + msg = 'Estimated {} bogo-Joules to run workload, expected {}'.format( + est_energy, exp_energy) + threshold = exp_energy * (1 + (self.energy_est_threshold_pct / 100.)) + self.assertLess(est_energy, threshold, msg=msg) + +class OneSmallTask(_EnergyModelTest): + """ + Test EAS for a single 20% task over 2 seconds + """ + workloads = { + 'one_small' : { + 'type' : 'rt-app', + 'conf' : { + 'class' : 'periodic', + 'params' : { + 'duty_cycle_pct': 20, + 'duration_s': 2, + 'period_ms': 10, + }, + 'tasks' : 1, + 'prefix' : 'many', + }, + }, + } + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class ThreeSmallTasks(_EnergyModelTest): + """ + Test EAS for 3 20% tasks over 2 seconds + """ + workloads = { + 'three_small' : { + 'type' : 'rt-app', + 'conf' : { + 'class' : 'periodic', + 'params' : { + 'duty_cycle_pct': 20, + 'duration_s': 2, + 'period_ms': 10, + }, + 'tasks' : 3, + 'prefix' : 'many', + }, + }, + } + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class TwoBigTasks(_EnergyModelTest): + """ + Test EAS for 2 80% tasks over 2 seconds + """ + workloads = { + 'two_big' : { + 'type' : 'rt-app', + 'conf' : { + 'class' : 'periodic', + 'params' : { + 'duty_cycle_pct': 80, + 'duration_s': 2, + 'period_ms': 10, + }, + 'tasks' : 2, + 'prefix' : 'many', + }, + }, + } + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class TwoBigThreeSmall(_EnergyModelTest): + """ + Test EAS for 2 70% tasks and 3 10% tasks over 2 seconds + """ + workloads = { + 'two_big_three_small' : { + 'type' : 'rt-app', + 'conf' : { + 'class' : 'profile', + 'params' : { + 'large' : { + 'kind' : 'Periodic', + 'params' : { + 'duty_cycle_pct': 70, + 'duration_s': 2, + 'period_ms': WORKLOAD_PERIOD_MS, + }, + 'tasks' : 2, + }, + 'small' : { + 'kind' : 'Periodic', + 'params' : { + 'duty_cycle_pct': 10, + 'duration_s': 2, + 'period_ms': WORKLOAD_PERIOD_MS, + }, + 'tasks' : 3, + }, + }, + }, + }, + } + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class RampUp(_EnergyModelTest): + """ + Test EAS for a task ramping from 5% up to 70% over 2 seconds + """ + workloads = { + "ramp_up" : { + "type": "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "r5_10-60" : { + "kind" : "Ramp", + "params" : { + "period_ms" : 16, + "start_pct" : 5, + "end_pct" : 70, + "delta_pct" : 5, + "time_s" : 2, + }, + }, + }, + }, + }, + } + + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class RampDown(_EnergyModelTest): + """ + Test EAS for a task ramping from 70% down to 5% over 2 seconds + """ + workloads = { + "ramp_down" : { + "type": "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "r5_10-60" : { + "kind" : "Ramp", + "params" : { + "period_ms" : 16, + "start_pct" : 70, + "end_pct" : 5, + "delta_pct" : 5, + "time_s" : 2, + }, + }, + }, + }, + }, + } + + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) + +class EnergyModelWakeMigration(_EnergyModelTest): + """ + Test EAS for tasks alternating beetween 10% and 50% + """ + workloads = { + 'em_wake_migration' : { + 'type' : 'rt-app', + 'conf' : { + 'class' : 'profile', + 'params' : { + 'wmig' : { + 'kind' : 'Step', + 'params' : { + 'start_pct': 10, + 'end_pct': 50, + 'time_s': 2, + 'loops': 2 + }, + # Create one task for each big cpu + 'tasks' : 'big', + }, + }, + }, + }, + } + @experiment_test + def test_slack(self, experiment, tasks): + self._test_slack(experiment, tasks) + @experiment_test + def test_task_placement(self, experiment, tasks): + self._test_task_placement(experiment, tasks) diff --git a/tests/lisa/test_energy_model.py b/tests/lisa/test_energy_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c9fe773a09385cceb34e642bd5fdf4c59b618f --- /dev/null +++ b/tests/lisa/test_energy_model.py @@ -0,0 +1,289 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2016, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import unittest +from unittest import TestCase + +from energy_model import (EnergyModel, ActiveState, EnergyModelCapacityError, + EnergyModelNode, EnergyModelRoot, PowerDomain) + +# Import these just to test that they can be constructed +from libs.utils.platforms.juno_energy import juno_energy +from libs.utils.platforms.pixel_energy import pixel_energy +from libs.utils.platforms.hikey_energy import hikey_energy + +""" A very basic test suite for the EnergyModel class.""" + +# WARNING! +# Note that the tests below have hard-coded expectations about the result. If +# you change the numbers in this EM, you'll need to recalculate those hard-coded +# values (or just refactor these tests) + +little_cluster_active_states = OrderedDict([ + (1000, ActiveState(power=10)), + (2000, ActiveState(power=20)), +]) + +little_cluster_idle_states = OrderedDict([ + ('WFI', 5), + ('cpu-sleep-0', 5), + ('cluster-sleep-0', 1), +]) + +little_cpu_active_states = OrderedDict([ + (1000, ActiveState(capacity=100, power=100)), + (1500, ActiveState(capacity=150, power=150)), + (2000, ActiveState(capacity=200, power=200)), +]) + +little_cpu_idle_states = OrderedDict([ + ('WFI', 5), + ('cpu-sleep-0', 0), + ('cluster-sleep-0', 0), +]) + +littles=[0, 1] +def little_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=little_cpu_active_states, + idle_states=little_cpu_idle_states) + +big_cluster_active_states = OrderedDict([ + (3000, ActiveState(power=30)), + (4000, ActiveState(power=40)), +]) + +big_cluster_idle_states = OrderedDict([ + ('WFI', 8), + ('cpu-sleep-0', 8), + ('cluster-sleep-0', 2), +]) + +big_cpu_active_states = OrderedDict([ + (3000, ActiveState(capacity=300, power=300)), + (4000, ActiveState(capacity=400, power=400)), +]) + +big_cpu_idle_states = OrderedDict([ + ('WFI', 9), + ('cpu-sleep-0', 0), + ('cluster-sleep-0', 0), +]) + +bigs=[2, 3] + +def big_cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=big_cpu_active_states, + idle_states=big_cpu_idle_states) + +em = EnergyModel( + root_node=EnergyModelRoot(children=[ + EnergyModelNode(name='cluster_little', + active_states=little_cluster_active_states, + idle_states=little_cluster_idle_states, + children=[little_cpu_node(0), + little_cpu_node(1)]), + EnergyModelNode(name='cluster_big', + active_states=big_cluster_active_states, + idle_states=big_cluster_idle_states, + children=[big_cpu_node(2), + big_cpu_node(3)]) + ]), + root_power_domain=PowerDomain(idle_states=[], children=[ + PowerDomain( + idle_states=['cluster-sleep-0'], + children=[PowerDomain(idle_states=['WFI', 'cpu-sleep-0'], cpu=c) + for c in littles]), + PowerDomain( + idle_states=['cluster-sleep-0'], + children=[PowerDomain(idle_states=['WFI', 'cpu-sleep-0'], cpu=c) + for c in bigs]), + ]), + freq_domains=[littles, bigs] +) + +class TestInvalid(TestCase): + """Test the sanity checks in EnerygModel setup""" + def test_overlapping_freq_doms(self): + """Can't build an EM with energy nodes overlapping freq domains""" + + # To make this easy we'll just use a single active state everywhere, and + # no idle states + active_states={10000: ActiveState(capacity=1024, power=100)} + + def cpu_node(cpu): + return EnergyModelNode(cpu=cpu, + active_states=active_states, + idle_states=[]) + + root_node = EnergyModelRoot(children=[ + EnergyModelNode( + name='cluster1', + active_states=active_states, + idle_states=[], + children=[cpu_node(0), cpu_node(1)])]) + + def cpu_pd(cpu): + return PowerDomain(idle_states=[], cpu=cpu) + + with self.assertRaises(ValueError): + EnergyModel(root_node=root_node, + root_power_domain=PowerDomain( + idle_states=[], children=[cpu_pd(0), cpu_pd(1)]), + freq_domains=[[0], [1]]), + + +class TestOptimalPlacement(TestCase): + def assertPlacementListEqual(self, l1, l2): + """ + Assert that a pair of lists of lists contain the same lists in any order + """ + s1 = set([tuple(l) for l in l1]) + s2 = set([tuple(l) for l in l2]) + self.assertSetEqual(s1, s2) + + def test_single_small(self): + placements = em.get_optimal_placements({'task0': 1}) + self.assertPlacementListEqual(placements, [[1, 0, 0, 0], + [0, 1, 0, 0]]) + + def test_single_big(self): + placements = em.get_optimal_placements({'task0': 350}) + self.assertPlacementListEqual(placements, [[0, 0, 350, 0], + [0, 0, 0, 350]]) + + def test_packing(self): + tasks = {'task' + str(i) : 10 for i in range(5)} + placements = em.get_optimal_placements(tasks) + total_util = sum(tasks.values()) + self.assertPlacementListEqual(placements, [[total_util, 0, 0, 0], + [0, total_util, 0, 0]]) + + def test_overutilized_single(self): + self.assertRaises(EnergyModelCapacityError, + em.get_optimal_placements, {'task0' : 401}) + + def test_overutilized_many(self): + total_cap = 400 * 2 + 200 * 2 + task_size = 200 + tasks = {'task' + str(i): task_size + for i in range((total_cap / task_size) + 1)} + self.assertRaises(EnergyModelCapacityError, + em.get_optimal_placements, tasks) + +class TestBiggestCpus(TestCase): + def test_biggest_cpus(self): + self.assertEqual(em.biggest_cpus, [2, 3]) + +class TestLittlestCpus(TestCase): + def test_littlest_cpus(self): + self.assertEqual(em.littlest_cpus, [0, 1]) + +class TestMaxCap(TestCase): + def test_max_cap(self): + max_caps = [n.max_capacity for n in em.cpu_nodes] + self.assertEqual(max_caps, [200, 200, 400, 400]) + +class TestEnergyEst(TestCase): + def test_all_overutilized(self): + big_cpu = 400 * 2 + little_cpu = 200 * 2 + big_cluster = 40 + little_cluster = 20 + + total = big_cpu + little_cpu + big_cluster + little_cluster + + power = em.estimate_from_cpu_util([10000] * 4) + exp = { + (0): { 'active': little_cpu, 'idle': 0}, + (1): { 'active': little_cpu, 'idle': 0}, + (2): { 'active': big_cpu, 'idle': 0}, + (3): { 'active': big_cpu, 'idle': 0}, + (0, 1): { 'active': little_cluster, 'idle': 0}, + (2, 3): { 'active': big_cluster, 'idle': 0} + } + for k, v in power.iteritems(): + self.assertAlmostEqual(v, power[k]) + + def test_all_idle(self): + self.assertEqual(sum(em.estimate_from_cpu_util([0, 0, 0, 0]).values()), + 0 * 4 # CPU power = 0 + + 2 # big cluster power + + 1) # LITTLE cluster power + + def test_one_little_half_lowest(self): + cpu0_util = 100 * 0.5 + self.assertEqual( + sum(em.estimate_from_cpu_util([cpu0_util, 0, 0, 0]).values()), + (0.5 * 100) # CPU0 active power + + (0.5 * 5) # CPU0 idle power + + (0.5 * 5) # LITTLE cluster idle power + + (0.5 * 10) # LITTLE cluster active power + + 2) # big cluster power + +class TestIdleStates(TestCase): + def test_zero_util_deepest(self): + self.assertEqual(em.guess_idle_states([0] * 4), ['cluster-sleep-0'] * 4) + + def test_single_cpu_used(self): + states = em.guess_idle_states([0, 0, 0, 1]) + self.assertEqual(states, ['cluster-sleep-0', 'cluster-sleep-0', + 'cpu-sleep-0', 'WFI']) + + states = em.guess_idle_states([0, 1, 0, 0]) + self.assertEqual(states, ['cpu-sleep-0', 'WFI', + 'cluster-sleep-0', 'cluster-sleep-0',]) + + def test_all_cpus_used(self): + states = em.guess_idle_states([1, 1, 1, 1]) + self.assertEqual(states, ['WFI'] * 4) + + def test_one_cpu_per_cluster(self): + states = em.guess_idle_states([0, 1, 0, 1]) + self.assertEqual(states, ['cpu-sleep-0', 'WFI'] * 2) + +class TestFreqs(TestCase): + + def test_zero_util_slowest(self): + self.assertEqual(em.guess_freqs([0] * 4), + [1000, 1000, 3000, 3000]) + + def test_high_util_fastest(self): + self.assertEqual(em.guess_freqs([100000] * 4), + [2000, 2000, 4000, 4000]) + + def test_freq_domains(self): + self.assertEqual(em.guess_freqs([0, 0, 0, 10000]), + [1000, 1000, 4000, 4000]) + + self.assertEqual(em.guess_freqs([0, 10000, 0, 10000]), + [2000, 2000, 4000, 4000]) + + self.assertEqual(em.guess_freqs([0, 10000, 0, 0]), + [2000, 2000, 3000, 3000]) + + def test_middle_freq(self): + self.assertEqual(em.guess_freqs([0, 110, 0, 0]), + [1500, 1500, 3000, 3000]) + +class TestNames(TestCase): + """Test that the default names for CPU nodes get set""" + def test_names(self): + self.assertListEqual([n.name for n in em.cpu_nodes], + ['cpu0', 'cpu1', 'cpu2', 'cpu3'])