From 5b0860bebc8dad537b3a03996e2991b6c9bb8dcf Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 29 Sep 2016 10:17:54 +0100 Subject: [PATCH 01/20] libs/utils/executor: Fix typo in log message --- libs/utils/executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 6d06e785e..b65e87b39 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -108,7 +108,7 @@ class Executor(): logging.info('%14s - Configured to run:', 'Executor') - logging.info('%14s - %3d targt configurations:', + logging.info('%14s - %3d target configurations:', 'Executor', len(self._tests_conf['confs'])) target_confs = [conf['tag'] for conf in self._tests_conf['confs']] target_confs = ', '.join(target_confs) -- GitLab From 4d3e67b2bf6af1487016c43518f19a0ebe906572 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 22 Sep 2016 17:53:39 +0100 Subject: [PATCH 02/20] libs/utils/executor: Remove irrelevant log messages --- libs/utils/executor.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index b65e87b39..2c88ddd0a 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -486,12 +486,6 @@ class Executor(): logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir) os.system('mkdir -p ' + self.te.out_dir) - logging.debug(r'%14s - cleanup target output folder', 'Executor') - - target_dir = self.target.working_directory - logging.debug('%14s - setup target directory [%s]', - 'Executor', target_dir) - def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx): tc_idx = tc['tag'] -- GitLab From 705db83c4e9ab166cc59b2d05bfb457cc6bb49ba Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 21 Sep 2016 17:24:58 +0100 Subject: [PATCH 03/20] libs/utils/executor: Default to 1 iteration if not specified --- libs/utils/executor.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 2c88ddd0a..ad81e0f61 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -99,8 +99,9 @@ class Executor(): self.te = TestEnv(target_conf, tests_conf) self.target = self.te.target + self._iterations = self._tests_conf.get('iterations', 1) # Compute total number of experiments - self._exp_count = self._tests_conf['iterations'] \ + self._exp_count = self._iterations \ * len(self._tests_conf['wloads']) \ * len(self._tests_conf['confs']) @@ -116,7 +117,7 @@ class Executor(): logging.info('%14s - %3d workloads (%d iterations each)', 'Executor', len(self._tests_conf['wloads']), - self._tests_conf['iterations']) + self._iterations) wload_confs = ', '.join(self._tests_conf['wloads']) logging.info('%14s - %s', 'Executor', wload_confs) @@ -138,7 +139,7 @@ class Executor(): for wl_idx in self._tests_conf['wloads']: # TEST: configuration wload = self._wload_init(tc, wl_idx) - for itr_idx in range(1, self._tests_conf['iterations']+1): + for itr_idx in range(1, self._iterations + 1): # WORKLOAD: execution self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx) exp_idx += 1 @@ -492,7 +493,7 @@ class Executor(): self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\ .format(exp_idx, self._exp_count, tc_idx, wl_idx, - run_idx, self._tests_conf['iterations'])) + run_idx, self._iterations)) # Setup local results folder self._wload_run_init(run_idx) -- GitLab From 0328c6163c4c5f90ef2bfec1513ec1388ef6e232 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 22 Sep 2016 18:12:09 +0100 Subject: [PATCH 04/20] libs/utils/executor: Store metadata about executed experiments This commit gives Executors a list of objects containing metadata about the experiments it runs. The primary motivation for this is so that tests subclassing LisaTest can use this information in their test_* methods. For example, tests can use the `out_dir` member to construct Trappy Ftrace objects. --- libs/utils/executor.py | 50 +++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index ad81e0f61..4fcabc092 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -17,6 +17,7 @@ from bart.common.Analyzer import Analyzer import collections +from collections import namedtuple import datetime import gzip import json @@ -41,6 +42,9 @@ from conf import JsonConf import wlgen +Experiment = namedtuple('Experiment', ['wload_name', 'wload', + 'conf', 'iteration', 'out_dir']) + class Executor(): def __init__(self, target_conf=None, tests_conf=None): @@ -130,19 +134,28 @@ class Executor(): def run(self): self._print_section('Executor', 'Experiments execution') + self.experiments = [] + # Run all the configured experiments - exp_idx = 1 for tc in self._tests_conf['confs']: # TARGET: configuration if not self._target_configure(tc): continue for wl_idx in self._tests_conf['wloads']: # TEST: configuration - wload = self._wload_init(tc, wl_idx) + wload, test_dir = self._wload_init(tc, wl_idx) for itr_idx in range(1, self._iterations + 1): - # WORKLOAD: execution - self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx) - exp_idx += 1 + exp = Experiment( + wload_name=wl_idx, + wload=wload, + conf=tc, + iteration=itr_idx, + out_dir=os.path.join(test_dir, str(itr_idx))) + self.experiments.append(exp) + + # WORKLOAD: execution + for exp_idx, experiment in enumerate(self.experiments): + self._wload_run(exp_idx, experiment) self._print_section('Executor', 'Experiments execution completed') logging.info('%14s - Results available in:', 'Executor') @@ -479,24 +492,21 @@ class Executor(): with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh: fh.write(output) - return wload - - def _wload_run_init(self, run_idx): - self.te.out_dir = '{}/{}'\ - .format(self.te.test_dir, run_idx) - logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir) - os.system('mkdir -p ' + self.te.out_dir) + return wload, self.te.test_dir - def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx): + def _wload_run(self, exp_idx, experiment): + tc = experiment.conf + wload = experiment.wload tc_idx = tc['tag'] self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\ .format(exp_idx, self._exp_count, - tc_idx, wl_idx, - run_idx, self._iterations)) + tc_idx, experiment.wload_name, + experiment.iteration, self._iterations)) # Setup local results folder - self._wload_run_init(run_idx) + logging.debug(r'%14s - out_dir [%s]', 'Executor', experiment.out_dir) + os.system('mkdir -p ' + experiment.out_dir) # FTRACE: start (if a configuration has been provided) if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): @@ -508,23 +518,23 @@ class Executor(): self.te.emeter.reset() # WORKLOAD: Run the configured workload - wload.run(out_dir=self.te.out_dir, cgroup=self._cgroup) + wload.run(out_dir=experiment.out_dir, cgroup=self._cgroup) # ENERGY: collect measurements if self.te.emeter: - self.te.emeter.report(self.te.out_dir) + self.te.emeter.report(experiment.out_dir) # FTRACE: stop and collect measurements if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): self.te.ftrace.stop() - trace_file = self.te.out_dir + '/trace.dat' + trace_file = experiment.out_dir + '/trace.dat' self.te.ftrace.get_trace(trace_file) logging.info(r'%14s - Collected FTrace binary trace:', 'Executor') logging.info(r'%14s - %s', 'Executor', trace_file.replace(self.te.res_dir, '')) - stats_file = self.te.out_dir + '/trace_stat.json' + stats_file = experiment.out_dir + '/trace_stat.json' self.te.ftrace.get_stats(stats_file) logging.info(r'%14s - Collected FTrace function profiling:', 'Executor') logging.info(r'%14s - %s', 'Executor', -- GitLab From d6ffaf98744ddbcac1ab58d7278ce8c6c34be88a Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 30 Sep 2016 14:45:52 +0100 Subject: [PATCH 05/20] libs/utils/executor: Make test_dir a local variable Now that the experiment output directory is stored as part of the Experiment object, the global self.te.test_dir is not needed and this variable can be made local to the function. --- libs/utils/executor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 4fcabc092..669d2ed46 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -478,21 +478,21 @@ class Executor(): wload = self._wload_conf(wl_idx, wlspec) # Keep track of platform configuration - self.te.test_dir = '{}/{}:{}:{}'\ + test_dir = '{}/{}:{}:{}'\ .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx) - os.system('mkdir -p ' + self.te.test_dir) - self.te.platform_dump(self.te.test_dir) + os.system('mkdir -p ' + test_dir) + self.te.platform_dump(test_dir) # Keep track of kernel configuration and version config = self.target.config - with gzip.open(os.path.join(self.te.test_dir, 'kernel.config'), 'wb') as fh: + with gzip.open(os.path.join(test_dir, 'kernel.config'), 'wb') as fh: fh.write(config.text) output = self.target.execute('{} uname -a'\ .format(self.target.busybox)) - with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh: + with open(os.path.join(test_dir, 'kernel.version'), 'w') as fh: fh.write(output) - return wload, self.te.test_dir + return wload, test_dir def _wload_run(self, exp_idx, experiment): tc = experiment.conf -- GitLab From 43a28826460dd192805df1d57128d34f19c3ef80 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 22 Sep 2016 19:08:51 +0100 Subject: [PATCH 06/20] libs/utils/executor: Allow "profile" rt-app wloads to specify a "tasks" param On "periodic" rt-app workloads, the "tasks" param specifies how many instances of the given task configuration should be instantiated. For example, if the tests_conf contains: "wloads" : { "my_periodic" : { "type" : "rt-app", "conf" : { "class" : "periodic", "params" : { "duty_cycle_pct": 100, "duration_s" : 5, "period_ms": 10 }, "tasks" : 10 // <-- Create 10 tasks }, } then 10 tasks with the given "params" will be created. This commit adds the same functionality for tasks within workloads with a "class" of "profile". For example: "fmig" : { "type" : "rt-app", "conf" : { "class" : "profile", "params" : { "small" : { "kind" : "Periodic", "params" : { "duty_cycle_pct": 10, "duration_s": 5, "period_ms": 10, }, "tasks" : 2 // <-- Create 2 10% tasks }, "big" : { "kind" : "Periodic", "params" : { "duty_cycle_pct": 100, "duration_s" : 5, "period_ms": 10 }, "tasks" : 4 // <-- and 4 100% tasks }, }, }, }, The above will create a workload that runs 4 100% tasks and 2 10% periodic tasks in parallel. If the "tasks" field is not present, the value defaults to 1. --- libs/utils/executor.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 669d2ed46..cbe3f0960 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -381,9 +381,7 @@ class Executor(): if conf['class'] == 'profile': params = {} # Load each task specification - for task_name in conf['params']: - task = conf['params'][task_name] - task_name = conf['prefix'] + task_name + for task_name, task in conf['params'].items(): if task['kind'] not in wlgen.__dict__: logging.error(r'%14s - RTA task of kind [%s] not supported', 'RTApp', task['kind']) @@ -392,7 +390,13 @@ class Executor(): 'in RT-App workload specification'\ .format(task)) task_ctor = getattr(wlgen, task['kind']) - params[task_name] = task_ctor(**task['params']).get() + num_tasks = task.get('tasks', 1) + task_idxs = self._wload_task_idxs(wl_idx, num_tasks) + for idx in task_idxs: + idx_name = str(idx) if len(task_idxs) > 0 else "" + task_name_idx = conf['prefix'] + task_name + idx_name + params[task_name_idx] = task_ctor(**task['params']).get() + rtapp = wlgen.RTA(self.target, wl_idx, calibration = self.te.calibration()) rtapp.conf(kind='profile', params=params, loadref=loadref, -- GitLab From 0ec5fcf10e834fd1c5f6505430c81426f5b5188d Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 4 Oct 2016 11:44:54 +0100 Subject: [PATCH 07/20] libs/utils/executor: Use os.makedirs instead of os.system('mkdir ...) --- libs/utils/executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/executor.py b/libs/utils/executor.py index cbe3f0960..4c6eceb0f 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -484,7 +484,7 @@ class Executor(): # Keep track of platform configuration test_dir = '{}/{}:{}:{}'\ .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx) - os.system('mkdir -p ' + test_dir) + os.makedirs(test_dir) self.te.platform_dump(test_dir) # Keep track of kernel configuration and version -- GitLab From 36b84bc7a7636fb48ab81af023f9bb1c6b6cc622 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 30 Sep 2016 11:54:06 +0100 Subject: [PATCH 08/20] libs/utils/test: Add experiment_test decorator It's expected that tests often have to iterate over all the experiments that were run in order to make assertions about the results of each. This decorator abstracts that iteration, also providing the experiment workload's task names for convenience. --- libs/utils/test.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/libs/utils/test.py b/libs/utils/test.py index a0c101f53..8f34e5b80 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -19,6 +19,8 @@ import logging import os import unittest +import wrapt + from conf import JsonConf from executor import Executor @@ -100,4 +102,26 @@ class LisaTest(unittest.TestCase): Code executed after running the experiments """ +@wrapt.decorator +def experiment_test(wrapped_test, instance, args, kwargs): + """ + Convert a LisaTest test method to be automatically called for each experiment + + The method will be passed the experiment object and a list of the names of + tasks that were run as the experiment's workload. + """ + for experiment in instance.executor.experiments: + tasks = experiment.wload.tasks.keys() + try: + wrapped_test(experiment, tasks, *args, **kwargs) + except AssertionError as e: + trace_relpath = os.path.join(experiment.out_dir, "trace.dat") + add_msg = "\n\tCheck trace file: " + os.path.abspath(trace_relpath) + orig_msg = e.args[0] if len(e.args) else "" + e.args = (orig_msg + add_msg,) + e.args[1:] + raise + +# Prevent nosetests from running experiment_test directly as a test case +experiment_test.__test__ = False + # vim :set tabstop=4 shiftwidth=4 expandtab -- GitLab From ad51cdcee3499fc20415ecba8db06dc76f4f7b6a Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 13 Oct 2016 17:16:49 +0100 Subject: [PATCH 09/20] libs/utils/test: Add new utility methods to LisaTest --- libs/utils/test.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/libs/utils/test.py b/libs/utils/test.py index 8f34e5b80..e497cc8fb 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -19,6 +19,9 @@ import logging import os import unittest +from bart.sched.SchedAssert import SchedAssert +from bart.sched.SchedMultiAssert import SchedMultiAssert +from devlib.utils.misc import memoized import wrapt from conf import JsonConf @@ -102,6 +105,42 @@ class LisaTest(unittest.TestCase): Code executed after running the experiments """ + @memoized + def get_multi_assert(self, experiment, task_filter=""): + """ + Return a SchedMultiAssert over the tasks whose names contain task_filter + + By default, this includes _all_ the tasks that were executed for the + experiment. + """ + tasks = experiment.wload.tasks.keys() + return SchedMultiAssert(experiment.out_dir, + self.te.topology, + [t for t in tasks if task_filter in t]) + + def get_start_time(self, experiment): + """ + Get the time at which the experiment workload began executing + """ + start_times_dict = self.get_multi_assert(experiment).getStartTime() + return min([t["starttime"] for t in start_times_dict.itervalues()]) + + def get_end_times(self, experiment): + """ + Get the time at which each task in the workload finished + + Returned as a dict; {"task_name": finish_time, ...} + """ + + end_times = {} + for task in experiment.wload.tasks.keys(): + sched_assert = SchedAssert(experiment.out_dir, self.te.topology, + execname=task) + end_times[task] = sched_assert.getEndTime() + + return end_times + + @wrapt.decorator def experiment_test(wrapped_test, instance, args, kwargs): """ -- GitLab From 029f4713c5969b8fe57ecc5822ad518cc2a11034 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 18:00:14 +0100 Subject: [PATCH 10/20] tests/eas/acceptance: Remove unused code --- tests/eas/acceptance.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 3fc65a9ed..9505044aa 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -54,26 +54,6 @@ def local_setup(env): # worry if the file isn't present. pass -def between_threshold_pct(a, b): - THRESHOLD_PERCENT = 3 - lower = b - THRESHOLD_PERCENT - upper = b + THRESHOLD_PERCENT - - if a >= lower and a <= upper: - return True - return False - - -def between_threshold_abs(a, b): - THRESHOLD = 0.25 - lower = b - THRESHOLD - upper = b + THRESHOLD - - if a >= lower and a <= upper: - return True - return False - - SMALL_WORKLOAD = { "duty_cycle_pct": SMALL_DCYCLE, -- GitLab From 391fc32c7b217caab5906a8a4a74b7be3cf2252f Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 18:01:23 +0100 Subject: [PATCH 11/20] tests/eas/acceptance: Remove unused import --- tests/eas/acceptance.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 9505044aa..4d0062b72 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -20,7 +20,6 @@ from wlgen import RTA, Periodic, Step from devlib.target import TargetError import trappy -import shutil import os import unittest import logging -- GitLab From 397025b55b34a884bad47ec3be67bccf1452762f Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 18:03:53 +0100 Subject: [PATCH 12/20] tests/eas/acceptance: Cleanup imports Re-order imports according to PEP8 [1]. Also remove duplicate `import json`. [1] https://www.python.org/dev/peps/pep-0008/#imports --- tests/eas/acceptance.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 4d0062b72..91ad7affe 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -15,16 +15,19 @@ # limitations under the License. # -from env import TestEnv -from wlgen import RTA, Periodic, Step -from devlib.target import TargetError - -import trappy -import os -import unittest -import logging import json import logging +import operator +import os +import trappy +import unittest + +from bart.sched.SchedAssert import SchedAssert +from bart.sched.SchedMultiAssert import SchedMultiAssert +from devlib.target import TargetError + +from wlgen import RTA, Periodic, Step +from env import TestEnv logging.basicConfig(level=logging.INFO) # Read the config file and update the globals @@ -75,11 +78,6 @@ STEP_WORKLOAD = { "loops": 2 } -from bart.sched.SchedAssert import SchedAssert -from bart.sched.SchedMultiAssert import SchedMultiAssert -import operator -import json - def log_result(data, log_fh): result_str = json.dumps(data, indent=3) -- GitLab From d4516bfc539ee1cf2f3bdb948385b13d05de17e4 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 14:22:29 +0100 Subject: [PATCH 13/20] tests/eas/acceptance: Use `min` in OffloadMigrationAndIdlePull --- tests/eas/acceptance.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 91ad7affe..aa13ccc65 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -461,11 +461,7 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): tasks = self.params.keys() num_offloaded_tasks = len(tasks) / 2 - first_task_finish_time = None - for task in tasks: - end_time = self.end_times[task] - if not first_task_finish_time or (end_time < first_task_finish_time): - first_task_finish_time = end_time + first_task_finish_time = min(self.end_times.values()) window = (self.get_migrator_activation_time(), first_task_finish_time) busy_time = self.a_assert.getCPUBusyTime("cluster", -- GitLab From d3e3a64a5c3e9a89651632ed823b72115ed9fea0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 18:20:22 +0100 Subject: [PATCH 14/20] tests/eas/acceptance: Add EasTest API This adds a class EasTest that inherits from LisaTest to provide a common set of functions for EAS acceptance tests. --- tests/eas/acceptance.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index aa13ccc65..a928ed880 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -24,10 +24,12 @@ import unittest from bart.sched.SchedAssert import SchedAssert from bart.sched.SchedMultiAssert import SchedMultiAssert + from devlib.target import TargetError from wlgen import RTA, Periodic, Step from env import TestEnv +from test import LisaTest, experiment_test logging.basicConfig(level=logging.INFO) # Read the config file and update the globals @@ -84,6 +86,41 @@ def log_result(data, log_fh): logging.info(result_str) log_fh.write(result_str) +class EasTest(LisaTest): + """ + Base class for EAS tests + """ + + @classmethod + def setUpClass(cls, *args, **kwargs): + conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), + cls.conf_basename) + + super(EasTest, cls)._init(conf_file, *args, **kwargs) + + @classmethod + def _experimentsInit(cls, *args, **kwargs): + super(EasTest, cls)._experimentsInit(*args, **kwargs) + + if SET_IS_BIG_LITTLE: + try: + cls.target.write_value( + "/proc/sys/kernel/sched_is_big_little", 1) + except TargetError: + # That flag doesn't exist on mainline-integration kernels, so + # don't worry if the file isn't present. + pass + + def _do_test_first_cpu(self, experiment, tasks): + """Test that all tasks start on a big CPU""" + + sched_assert = self.get_multi_assert(experiment) + + self.assertTrue( + sched_assert.assertFirstCpu( + self.target.bl.bigs, + rank=len(tasks)), + msg="Not all the new generated tasks started on a big CPU") class ForkMigration(unittest.TestCase): """ -- GitLab From 346709896853e40f7d8d14dc7dbfdad30f7b59b1 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 23 Sep 2016 11:20:05 +0100 Subject: [PATCH 15/20] tests/eas/acceptance: Port ForkMigration to EasTest --- tests/eas/acceptance.py | 68 ++-------------------- tests/eas/acceptance_fork_migration.config | 55 +++++++++++++++++ 2 files changed, 61 insertions(+), 62 deletions(-) create mode 100644 tests/eas/acceptance_fork_migration.config diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index a928ed880..9da67838f 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -122,7 +122,7 @@ class EasTest(LisaTest): rank=len(tasks)), msg="Not all the new generated tasks started on a big CPU") -class ForkMigration(unittest.TestCase): +class ForkMigration(EasTest): """ Goal ==== @@ -141,68 +141,12 @@ class ForkMigration(unittest.TestCase): The threads start on a big core. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.task_prefix = "fmig" - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join(cls.env.res_dir, "fork_migration.dat") - cls.log_file = os.path.join(cls.env.res_dir, "fork_migration.json") - cls.populate_params() - cls.tasks = cls.params.keys() - cls.num_tasks = len(cls.tasks) - local_setup(cls.env) - cls.run_workload() - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - big_prefix = cls.task_prefix + "_big" - for idx in range(len(cls.env.target.bl.bigs)): - task = big_prefix + str(idx) - cls.params[task] = Periodic(**BIG_WORKLOAD).get() - - little_prefix = cls.task_prefix + "_little" - for idx in range(len(cls.env.target.bl.littles)): - task = little_prefix + str(idx) - cls.params[task] = Periodic(**SMALL_WORKLOAD).get() - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "fork_migration", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - def test_first_cpu(self): - "Fork Migration: Test First CPU" - - logging.info("Fork Migration: Test First CPU") - f_assert = SchedMultiAssert( - self.trace_file, - self.env.topology, - execnames=self.tasks) - - log_result( - f_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - f_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") + conf_basename = "acceptance_fork_migration.config" + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Fork Migration: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) class SmallTaskPacking(unittest.TestCase): """ diff --git a/tests/eas/acceptance_fork_migration.config b/tests/eas/acceptance_fork_migration.config new file mode 100644 index 000000000..02d75ec70 --- /dev/null +++ b/tests/eas/acceptance_fork_migration.config @@ -0,0 +1,55 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + // Create N 100% tasks and M 10% tasks which run in parallel, where N is + // the number of big CPUs and M is the number of LITTLE CPUs. + "fmig" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "small" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 10, + "duration_s": 5, + "period_ms": 10, + }, + "prefix" : "small" + }, + "big" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s" : 5, + "period_ms": 10 + }, + "prefix" : "large", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} -- GitLab From a5d88332f8f6a7995303d276e26362ed8780defc Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 23 Sep 2016 15:19:08 +0100 Subject: [PATCH 16/20] tests/eas/acceptance: Port SmallTaskPacking to LisaTest --- tests/eas/acceptance.py | 77 +++---------------- .../eas/acceptance_small_task_packing.config | 42 ++++++++++ 2 files changed, 54 insertions(+), 65 deletions(-) create mode 100644 tests/eas/acceptance_small_task_packing.config diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 9da67838f..a47652e8a 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -148,7 +148,7 @@ class ForkMigration(EasTest): """Fork Migration: Test First CPU""" self._do_test_first_cpu(experiment, tasks) -class SmallTaskPacking(unittest.TestCase): +class SmallTaskPacking(EasTest): """ Goal ==== @@ -168,83 +168,30 @@ class SmallTaskPacking(unittest.TestCase): All tasks run on little cpus. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.task_prefix = "stp" - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join( - cls.env.res_dir, - "small_task_packing.dat") - cls.log_file = os.path.join(cls.env.res_dir, "small_task_packing.json") - cls.num_tasks = len(cls.env.target.bl.bigs + cls.env.target.bl.littles) - cls.populate_params() - cls.tasks = cls.params.keys() - local_setup(cls.env) - cls.run_workload() - cls.s_assert = SchedMultiAssert( - cls.trace_file, - cls.env.topology, - execnames=cls.tasks) - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - for i in range(cls.num_tasks): - task = cls.task_prefix + str(i) - cls.params[task] = Periodic(**SMALL_WORKLOAD).get() - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "small_task_packing", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) + conf_basename = "acceptance_small_task_packing.config" - def test_small_task_pack_first_cpu(self): - "Small Task Packing: First CPU: BIG" - - logging.info("Small Task Packing: First CPU: BIG\n") - log_result(self.s_assert.getFirstCpu(), self.log_fh) - self.assertTrue( - self.s_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Small Task Packing: test first CPU""" + self._do_test_first_cpu(experiment, tasks) - def test_small_task_residency(self): + @experiment_test + def test_small_task_residency(self, experiment, tasks): "Small Task Packing: Test Residency (Little Cluster)" - logging.info("Small Task Packing: Test Residency (Little Cluster)") - log_result( - self.s_assert.getResidency( - "cluster", - self.env.target.bl.littles, - percent=True), self.log_fh) + sched_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertResidency( + sched_assert.assertResidency( "cluster", - self.env.target.bl.littles, + self.target.bl.littles, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, - rank=self.num_tasks), + rank=len(tasks)), msg="Not all tasks are running on LITTLE cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) - class OffloadMigrationAndIdlePull(unittest.TestCase): """ Goal diff --git a/tests/eas/acceptance_small_task_packing.config b/tests/eas/acceptance_small_task_packing.config new file mode 100644 index 000000000..251799433 --- /dev/null +++ b/tests/eas/acceptance_small_task_packing.config @@ -0,0 +1,42 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + // Create one small task for each CPU + "small_tasks" : { + "type" : "rt-app", + "conf" : { + "class" : "periodic", + "params" : { + "duty_cycle_pct": 10, + "duration_s": 5, + "period_ms": 10, + }, + // Create one task for each CPU + "tasks" : "cpus", + "prefix" : "stp" + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} -- GitLab From a3b3eba6553af4e787a5a28a09fbd5429f430b50 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 23 Sep 2016 17:22:30 +0100 Subject: [PATCH 17/20] tests/eas/acceptance: Port OffloadMigrationAndIdlePull to LisaTest --- tests/eas/acceptance.py | 190 +++++------------- tests/eas/acceptance_offload_idle_pull.config | 56 ++++++ 2 files changed, 105 insertions(+), 141 deletions(-) create mode 100644 tests/eas/acceptance_offload_idle_pull.config diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index a47652e8a..314973799 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -192,7 +192,7 @@ class SmallTaskPacking(EasTest): msg="Not all tasks are running on LITTLE cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) -class OffloadMigrationAndIdlePull(unittest.TestCase): +class OffloadMigrationAndIdlePull(EasTest): """ Goal ==== @@ -241,130 +241,28 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join(cls.env.res_dir, "offload_idle_pull.dat") - cls.log_file = os.path.join(cls.env.res_dir, "offload_idle_pull.json") - cls.early_starters = [] - cls.migrators = [] - cls.num_tasks = len(cls.env.target.bl.bigs) - cls.populate_tasks() - local_setup(cls.env) - cls.run_workload() - - cls.trace = trappy.FTrace(cls.trace_file) - cls.m_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=cls.migrators) - cls.e_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=cls.early_starters) - - all_tasks = cls.early_starters + cls.migrators - cls.a_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=all_tasks) - cls.offset = cls.get_offset() - - cls.end_times = cls.calculate_end_times() - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_tasks(cls): - migrator_workload = BIG_WORKLOAD.copy() - migrator_workload["duration_s"] = 9 - migrator_workload["delay_s"] = OFFLOAD_MIGRATION_MIGRATOR_DELAY - - for idx in range(cls.num_tasks): - task = "early_starters" + str(idx) - cls.params[task] = Periodic(**BIG_WORKLOAD).get() - cls.early_starters.append(task) - - # Tasks that will be idle pulled - task = "migrator" + str(idx) - cls.params[task] = Periodic(**migrator_workload).get() - cls.migrators.append(task) - - @classmethod - def run_workload(cls): - - wload = RTA( - cls.env.target, - "offload_idle_pull", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - @classmethod - def get_offset(cls): - task_start_times = cls.a_assert.getStartTime().values() - return min([t['starttime'] for t in task_start_times]) - - @classmethod - def calculate_end_times(cls): - - end_times = {} - for task in cls.params.keys(): - sched_assert = SchedAssert(cls.trace, cls.env.topology, - execname=task) - end_times[task] = sched_assert.getEndTime() + conf_basename = "acceptance_offload_idle_pull.config" - return end_times - - def get_migrator_activation_time(self): - start_times_dict = self.m_assert.getStartTime() - start_time = min(t['starttime'] for t in start_times_dict.itervalues()) - - return start_time + OFFLOAD_MIGRATION_MIGRATOR_DELAY - - def test_first_cpu_early_starters(self): - """Offload Migration and Idle Pull: Test First CPU (Early Starters)""" - - logging.info( - "Offload Migration and Idle Pull: Test First CPU (Early Starters)") - log_result( - self.e_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.e_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new 'early starter' tasks started on a big CPU") - - def test_first_cpu_migrators(self): - "Offload Migration and Idle Pull: Test First CPU (Migrators)" - - logging.info( - "Offload Migration and Idle Pull: Test First CPU (Migrators)") - - log_result( - self.m_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.m_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new 'migrator' tasks started on a big CPU") + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Offload Migration and Idle Pull: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) - def test_big_cpus_fully_loaded(self): + @experiment_test + def test_big_cpus_fully_loaded(self, experiment, tasks): """Offload Migration and Idle Pull: Big cpus are fully loaded as long as there are tasks left to run in the system""" - num_big_cpus = len(self.env.target.bl.bigs) + num_big_cpus = len(self.target.bl.bigs) - end_times = sorted(self.end_times.values()) + sched_assert = self.get_multi_assert(experiment) + + end_times = sorted(self.get_end_times(experiment).values()) # Window of time until the first migrator finishes - window = (self.offset, end_times[-num_big_cpus]) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.bigs, - window=window, percent=True) + window = (self.get_start_time(experiment), end_times[-num_big_cpus]) + busy_time = sched_assert.getCPUBusyTime("cluster", + self.target.bl.bigs, + window=window, percent=True) + msg = "Big cpus were not fully loaded while there were enough big tasks to fill them" self.assertGreater(busy_time, OFFLOAD_EXPECTED_BUSY_TIME_PCT, msg=msg) @@ -373,9 +271,9 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): for i in range(num_big_cpus-1): big_cpus_left = num_big_cpus - i - 1 window = (end_times[-num_big_cpus+i], end_times[-num_big_cpus+i+1]) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.bigs, - window=window, percent=True) + busy_time = sched_assert.getCPUBusyTime("cluster", + self.target.bl.bigs, + window=window, percent=True) expected_busy_time = OFFLOAD_EXPECTED_BUSY_TIME_PCT * \ big_cpus_left / num_big_cpus @@ -384,17 +282,27 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): self.assertGreater(busy_time, expected_busy_time, msg=msg) - def test_little_cpus_run_tasks(self): + @experiment_test + def test_little_cpus_run_tasks(self, experiment, tasks): """Offload Migration and Idle Pull: Little cpus run tasks while bigs are busy""" - tasks = self.params.keys() + num_offloaded_tasks = len(tasks) / 2 - first_task_finish_time = min(self.end_times.values()) + end_times = self.get_end_times(experiment).values() + first_task_finish_time = min(end_times) + + migrators_assert = self.get_multi_assert(experiment, "migrator") + start_time = min(t["starttime"] + for t in migrators_assert.getStartTime().itervalues()) + migrator_activation_time = start_time + OFFLOAD_MIGRATION_MIGRATOR_DELAY + + window = (migrator_activation_time, first_task_finish_time) + + all_tasks_assert = self.get_multi_assert(experiment) - window = (self.get_migrator_activation_time(), first_task_finish_time) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.littles, - window=window) + busy_time = all_tasks_assert.getCPUBusyTime("cluster", + self.target.bl.littles, + window=window) window_len = window[1] - window[0] expected_busy_time = window_len * num_offloaded_tasks * \ @@ -403,25 +311,26 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): self.assertGreater(busy_time, expected_busy_time, msg=msg) - def test_all_tasks_run_on_a_big_cpu(self): + @experiment_test + def test_all_tasks_run_on_a_big_cpu(self, experiment, tasks): """Offload Migration and Idle Pull: All tasks run on a big cpu at some point Note: this test may fail in big.LITTLE platforms in which the little cpus are almost as performant as the big ones. """ - - for task in self.params.keys(): - sa = SchedAssert(self.trace, self.env.topology, execname=task) - window = (0, self.end_times[task]) - big_residency = sa.getResidency("cluster", self.env.target.bl.bigs, + for task in tasks: + sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) + end_times = self.get_end_times(experiment) + window = (0, end_times[task]) + big_residency = sa.getResidency("cluster", self.target.bl.bigs, window=window, percent=True) - log_result(big_residency, self.log_fh) msg = "Task {} didn't run on a big cpu.".format(task) self.assertGreater(big_residency, 0, msg=msg) - def test_all_tasks_finish_on_a_big_cpu(self): + @experiment_test + def test_all_tasks_finish_on_a_big_cpu(self, experiment, tasks): """Offload Migration and Idle Pull: All tasks finish on a big cpu Note: this test may fail in big.LITTLE systems where the @@ -430,12 +339,11 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): task. """ - - for task in self.params.keys(): - sa = SchedAssert(self.trace, self.env.topology, execname=task) + for task in tasks: + sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) msg = "Task {} did not finish on a big cpu".format(task) - self.assertIn(sa.getLastCpu(), self.env.target.bl.bigs, msg=msg) + self.assertIn(sa.getLastCpu(), self.target.bl.bigs, msg=msg) class WakeMigration(unittest.TestCase): diff --git a/tests/eas/acceptance_offload_idle_pull.config b/tests/eas/acceptance_offload_idle_pull.config new file mode 100644 index 000000000..f53d76c2b --- /dev/null +++ b/tests/eas/acceptance_offload_idle_pull.config @@ -0,0 +1,56 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + "early_and_migrators" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "early" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s": 5, + "period_ms": 10, + }, + // Create one task for each big CPU + "tasks" : "big", + }, + "migrator" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s": 5, + "period_ms": 10, + "delay_s": 1 + }, + // Create one task for each big CPU + "tasks" : "big", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} -- GitLab From 6ba6ed0b798e17df7447f046cd67d217a7289e34 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 17:50:02 +0100 Subject: [PATCH 18/20] tests/eas/acceptance: Port WakeMigration test to use LisaTest --- tests/eas/acceptance.py | 166 +++++---------------- tests/eas/acceptance_wake_migration.config | 48 ++++++ 2 files changed, 88 insertions(+), 126 deletions(-) create mode 100644 tests/eas/acceptance_wake_migration.config diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 314973799..c4727cccb 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -346,7 +346,7 @@ class OffloadMigrationAndIdlePull(EasTest): self.assertIn(sa.getLastCpu(), self.target.bl.bigs, msg=msg) -class WakeMigration(unittest.TestCase): +class WakeMigration(EasTest): """ Goal ==== @@ -370,193 +370,107 @@ class WakeMigration(unittest.TestCase): the big cpus when they are big. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.env = TestEnv(test_conf=TEST_CONF) - cls.task_prefix = "wmig" - cls.trace_file = os.path.join(cls.env.res_dir, "wake_migration.dat") - cls.log_file = os.path.join(cls.env.res_dir, "wake_migration.json") - cls.populate_params() - cls.tasks = cls.params.keys() - cls.num_tasks = len(cls.tasks) - local_setup(cls.env) - cls.run_workload() - cls.s_assert = SchedMultiAssert( - cls.trace_file, - cls.env.topology, - execnames=cls.tasks) - cls.offset = cls.get_offset(cls.tasks[0]) - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - num_big_cpus = len(cls.env.target.bl.bigs) - - for i in range(num_big_cpus): - task_name = "{}_{}".format(cls.task_prefix, i) - cls.params[task_name] = Step(**STEP_WORKLOAD).get() - - cls.phase_duration = STEP_WORKLOAD["time_s"] - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "wake_migration", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - @classmethod - def get_offset(cls, task_name): - return SchedAssert( - cls.trace_file, - cls.env.topology, - execname=task_name).getStartTime() + conf_basename = "acceptance_wake_migration.config" + phase_duration = WORKLOAD_DURATION_S - def test_first_cpu(self): + @experiment_test + def test_first_cpu(self, experiment, tasks): """Wake Migration: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) - logging.info("Wake Migration: Test First CPU") - - log_result(self.s_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.s_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") - - - def test_little_big_switch1(self): + @experiment_test + def test_little_big_switch1(self, experiment, tasks): """Wake Migration: LITTLE -> BIG: 1""" - expected_time = self.offset + self.phase_duration + expected_time = self.get_start_time(experiment) + self.phase_duration switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) - logging.info( - "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - window=switch_window), self.log_fh) + sched_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertSwitch( + sched_assert.assertSwitch( "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - rank=self.num_tasks, + self.target.bl.littles, + self.target.bl.bigs, + rank=len(tasks), window=switch_window), msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ .format(switch_window)) - def test_little_big_switch2(self): + @experiment_test + def test_little_big_switch2(self, experiment, tasks): """Wake Migration: LITTLE -> BIG: 2""" # little - big - little - big # ^ # We want to test that this little to big migration happens. So we skip # the first three phases. - expected_time = self.offset + 3 * self.phase_duration + expected_time = (self.get_start_time(experiment) + + 3*self.phase_duration) switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) - logging.info( - "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - window=switch_window), self.log_fh) + s_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertSwitch( + s_assert.assertSwitch( "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - rank=self.num_tasks, + self.target.bl.littles, + self.target.bl.bigs, + rank=len(tasks), window=switch_window), msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ .format(switch_window)) - def test_big_little_switch1(self): + @experiment_test + def test_big_little_switch1(self, experiment, tasks): """Wake Migration: BIG -> LITLLE: 1""" - expected_time = self.offset + expected_time = self.get_start_time(experiment) switch_window = ( max(expected_time - SWITCH_WINDOW_HALF, 0), expected_time + SWITCH_WINDOW_HALF) - logging.info( - "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - window=switch_window), self.log_fh) + s_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertSwitch( + s_assert.assertSwitch( "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - rank=self.num_tasks, + self.target.bl.bigs, + self.target.bl.littles, + rank=len(tasks), window=switch_window), msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ .format(switch_window)) - def test_big_little_switch2(self): + @experiment_test + def test_big_little_switch2(self, experiment, tasks): """Wake Migration: BIG -> LITLLE: 2""" # little - big - little - big # ^ # We want to test that this big to little migration happens. So we skip # the first two phases. - expected_time = self.offset + 2 * self.phase_duration + expected_time = (self.get_start_time(experiment) + + 2*self.phase_duration) switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) - logging.info( - "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - window=switch_window), self.log_fh) + s_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertSwitch( + s_assert.assertSwitch( "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - rank=self.num_tasks, + self.target.bl.bigs, + self.target.bl.littles, + rank=len(tasks), window=switch_window), msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ .format(switch_window)) diff --git a/tests/eas/acceptance_wake_migration.config b/tests/eas/acceptance_wake_migration.config new file mode 100644 index 000000000..963f101fb --- /dev/null +++ b/tests/eas/acceptance_wake_migration.config @@ -0,0 +1,48 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch", + "cpu_frequency", + ], + }, + "wloads" : { + // Create one small task for each CPU + "wake_migration" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "wmig" : { + "kind" : "Step", + "params" : { + "start_pct": 10, // STEP_LOW_DCYCLE + "end_pct": 50, // STEP_HIGH_DCYCLE + "time_s": 5, // WORKLOAD_DURATION_S + "loops": 2 + }, + // Create one task for each CPU + "tasks" : "big", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} -- GitLab From 3aca246b0c283c224f55d5b2955f314257e7bfff Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 28 Sep 2016 18:44:10 +0100 Subject: [PATCH 19/20] tests/eas/acceptance: Factor out _assert_switch in WakeMigration Code is heavily duplicated between the test methods in WakeMigration. Create an _assert_switch method that avoids this duplication. --- tests/eas/acceptance.py | 99 +++++++++++++---------------------------- 1 file changed, 30 insertions(+), 69 deletions(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index c4727cccb..b01fbff54 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -371,34 +371,45 @@ class WakeMigration(EasTest): """ conf_basename = "acceptance_wake_migration.config" - phase_duration = WORKLOAD_DURATION_S @experiment_test def test_first_cpu(self, experiment, tasks): """Wake Migration: Test First CPU""" self._do_test_first_cpu(experiment, tasks) - @experiment_test - def test_little_big_switch1(self, experiment, tasks): - """Wake Migration: LITTLE -> BIG: 1""" - expected_time = self.get_start_time(experiment) + self.phase_duration - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) + def _assert_switch(self, experiment, expected_switch_to, phases): + if expected_switch_to == "big": + switch_from = self.target.bl.littles + switch_to = self.target.bl.bigs + elif expected_switch_to == "little": + switch_from = self.target.bl.bigs + switch_to = self.target.bl.littles + else: + raise ValueError("Invalid expected_switch_to") sched_assert = self.get_multi_assert(experiment) + expected_time = (self.get_start_time(experiment) + + phases*WORKLOAD_DURATION_S) + switch_window = (max(expected_time - SWITCH_WINDOW_HALF, 0), + expected_time + SWITCH_WINDOW_HALF) + + fmt = "Not all tasks wake-migrated to {} cores in the expected window: {}" + msg = fmt.format(expected_switch_to, switch_window) + self.assertTrue( sched_assert.assertSwitch( "cluster", - self.target.bl.littles, - self.target.bl.bigs, - rank=len(tasks), - window=switch_window), - msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ - .format(switch_window)) + switch_from, + switch_to, + window=switch_window, + rank=len(experiment.wload.tasks)), + msg=msg) + + @experiment_test + def test_little_big_switch1(self, experiment, tasks): + """Wake Migration: LITTLE -> BIG: 1""" + self._assert_switch(experiment, "big", 1) @experiment_test def test_little_big_switch2(self, experiment, tasks): @@ -408,44 +419,12 @@ class WakeMigration(EasTest): # ^ # We want to test that this little to big migration happens. So we skip # the first three phases. - expected_time = (self.get_start_time(experiment) - + 3*self.phase_duration) - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) - - s_assert = self.get_multi_assert(experiment) - - self.assertTrue( - s_assert.assertSwitch( - "cluster", - self.target.bl.littles, - self.target.bl.bigs, - rank=len(tasks), - window=switch_window), - msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ - .format(switch_window)) + self._assert_switch(experiment, "big", 3) @experiment_test def test_big_little_switch1(self, experiment, tasks): """Wake Migration: BIG -> LITLLE: 1""" - expected_time = self.get_start_time(experiment) - switch_window = ( - max(expected_time - SWITCH_WINDOW_HALF, 0), expected_time + SWITCH_WINDOW_HALF) - - s_assert = self.get_multi_assert(experiment) - - self.assertTrue( - s_assert.assertSwitch( - "cluster", - self.target.bl.bigs, - self.target.bl.littles, - rank=len(tasks), - window=switch_window), - msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ - .format(switch_window)) + self._assert_switch(experiment, "little", 0) @experiment_test def test_big_little_switch2(self, experiment, tasks): @@ -455,22 +434,4 @@ class WakeMigration(EasTest): # ^ # We want to test that this big to little migration happens. So we skip # the first two phases. - expected_time = (self.get_start_time(experiment) - + 2*self.phase_duration) - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) - - s_assert = self.get_multi_assert(experiment) - - self.assertTrue( - s_assert.assertSwitch( - "cluster", - self.target.bl.bigs, - self.target.bl.littles, - rank=len(tasks), - window=switch_window), - msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ - .format(switch_window)) + self._assert_switch(experiment, "little", 2) -- GitLab From cd7ca606aeca8767f2310265b1955776bd9dcd8b Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 27 Sep 2016 17:06:22 +0100 Subject: [PATCH 20/20] tests/eas/acceptance: Remove unused code Now that all the EAS acceptance tests have been ported to use the EasTest class this code is no longer needed --- tests/eas/acceptance.py | 48 ----------------------------------------- 1 file changed, 48 deletions(-) diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index b01fbff54..fe3c0fd94 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -16,22 +16,18 @@ # import json -import logging import operator import os import trappy import unittest from bart.sched.SchedAssert import SchedAssert -from bart.sched.SchedMultiAssert import SchedMultiAssert from devlib.target import TargetError -from wlgen import RTA, Periodic, Step from env import TestEnv from test import LisaTest, experiment_test -logging.basicConfig(level=logging.INFO) # Read the config file and update the globals CONF_FILE = os.path.join( os.path.dirname( @@ -42,50 +38,6 @@ with open(CONF_FILE, "r") as fh: conf_vars = json.load(fh) globals().update(conf_vars) - -def local_setup(env): - env.target.cpufreq.set_all_governors("performance") - - if ENABLE_EAS: - env.target.execute( - "echo ENERGY_AWARE > /sys/kernel/debug/sched_features") - - if SET_IS_BIG_LITTLE: - try: - env.target.write_value("/proc/sys/kernel/sched_is_big_little", 1) - except TargetError: - # That flag doesn't exist on mainline-integration kernels, so don't - # worry if the file isn't present. - pass - -SMALL_WORKLOAD = { - - "duty_cycle_pct": SMALL_DCYCLE, - "duration_s": WORKLOAD_DURATION_S, - "period_ms": WORKLOAD_PERIOD_MS, -} - -BIG_WORKLOAD = { - - "duty_cycle_pct": BIG_DCYCLE, - "duration_s": WORKLOAD_DURATION_S, - "period_ms": WORKLOAD_PERIOD_MS, -} - -STEP_WORKLOAD = { - - "start_pct": STEP_LOW_DCYCLE, - "end_pct": STEP_HIGH_DCYCLE, - "time_s": WORKLOAD_DURATION_S, - "loops": 2 -} - - -def log_result(data, log_fh): - result_str = json.dumps(data, indent=3) - logging.info(result_str) - log_fh.write(result_str) - class EasTest(LisaTest): """ Base class for EAS tests -- GitLab