diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 6d06e785ebfbccdf7ba952395cc7c67ccc710a76..4c6eceb0f41ae404512f0f13d28a7a30a4d93f97 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -17,6 +17,7 @@ from bart.common.Analyzer import Analyzer import collections +from collections import namedtuple import datetime import gzip import json @@ -41,6 +42,9 @@ from conf import JsonConf import wlgen +Experiment = namedtuple('Experiment', ['wload_name', 'wload', + 'conf', 'iteration', 'out_dir']) + class Executor(): def __init__(self, target_conf=None, tests_conf=None): @@ -99,8 +103,9 @@ class Executor(): self.te = TestEnv(target_conf, tests_conf) self.target = self.te.target + self._iterations = self._tests_conf.get('iterations', 1) # Compute total number of experiments - self._exp_count = self._tests_conf['iterations'] \ + self._exp_count = self._iterations \ * len(self._tests_conf['wloads']) \ * len(self._tests_conf['confs']) @@ -108,7 +113,7 @@ class Executor(): logging.info('%14s - Configured to run:', 'Executor') - logging.info('%14s - %3d targt configurations:', + logging.info('%14s - %3d target configurations:', 'Executor', len(self._tests_conf['confs'])) target_confs = [conf['tag'] for conf in self._tests_conf['confs']] target_confs = ', '.join(target_confs) @@ -116,7 +121,7 @@ class Executor(): logging.info('%14s - %3d workloads (%d iterations each)', 'Executor', len(self._tests_conf['wloads']), - self._tests_conf['iterations']) + self._iterations) wload_confs = ', '.join(self._tests_conf['wloads']) logging.info('%14s - %s', 'Executor', wload_confs) @@ -129,19 +134,28 @@ class Executor(): def run(self): self._print_section('Executor', 'Experiments execution') + self.experiments = [] + # Run all the configured experiments - exp_idx = 1 for tc in self._tests_conf['confs']: # TARGET: configuration if not self._target_configure(tc): continue for wl_idx in self._tests_conf['wloads']: # TEST: configuration - wload = self._wload_init(tc, wl_idx) - for itr_idx in range(1, self._tests_conf['iterations']+1): - # WORKLOAD: execution - self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx) - exp_idx += 1 + wload, test_dir = self._wload_init(tc, wl_idx) + for itr_idx in range(1, self._iterations + 1): + exp = Experiment( + wload_name=wl_idx, + wload=wload, + conf=tc, + iteration=itr_idx, + out_dir=os.path.join(test_dir, str(itr_idx))) + self.experiments.append(exp) + + # WORKLOAD: execution + for exp_idx, experiment in enumerate(self.experiments): + self._wload_run(exp_idx, experiment) self._print_section('Executor', 'Experiments execution completed') logging.info('%14s - Results available in:', 'Executor') @@ -367,9 +381,7 @@ class Executor(): if conf['class'] == 'profile': params = {} # Load each task specification - for task_name in conf['params']: - task = conf['params'][task_name] - task_name = conf['prefix'] + task_name + for task_name, task in conf['params'].items(): if task['kind'] not in wlgen.__dict__: logging.error(r'%14s - RTA task of kind [%s] not supported', 'RTApp', task['kind']) @@ -378,7 +390,13 @@ class Executor(): 'in RT-App workload specification'\ .format(task)) task_ctor = getattr(wlgen, task['kind']) - params[task_name] = task_ctor(**task['params']).get() + num_tasks = task.get('tasks', 1) + task_idxs = self._wload_task_idxs(wl_idx, num_tasks) + for idx in task_idxs: + idx_name = str(idx) if len(task_idxs) > 0 else "" + task_name_idx = conf['prefix'] + task_name + idx_name + params[task_name_idx] = task_ctor(**task['params']).get() + rtapp = wlgen.RTA(self.target, wl_idx, calibration = self.te.calibration()) rtapp.conf(kind='profile', params=params, loadref=loadref, @@ -464,44 +482,35 @@ class Executor(): wload = self._wload_conf(wl_idx, wlspec) # Keep track of platform configuration - self.te.test_dir = '{}/{}:{}:{}'\ + test_dir = '{}/{}:{}:{}'\ .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx) - os.system('mkdir -p ' + self.te.test_dir) - self.te.platform_dump(self.te.test_dir) + os.makedirs(test_dir) + self.te.platform_dump(test_dir) # Keep track of kernel configuration and version config = self.target.config - with gzip.open(os.path.join(self.te.test_dir, 'kernel.config'), 'wb') as fh: + with gzip.open(os.path.join(test_dir, 'kernel.config'), 'wb') as fh: fh.write(config.text) output = self.target.execute('{} uname -a'\ .format(self.target.busybox)) - with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh: + with open(os.path.join(test_dir, 'kernel.version'), 'w') as fh: fh.write(output) - return wload - - def _wload_run_init(self, run_idx): - self.te.out_dir = '{}/{}'\ - .format(self.te.test_dir, run_idx) - logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir) - os.system('mkdir -p ' + self.te.out_dir) - - logging.debug(r'%14s - cleanup target output folder', 'Executor') - - target_dir = self.target.working_directory - logging.debug('%14s - setup target directory [%s]', - 'Executor', target_dir) + return wload, test_dir - def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx): + def _wload_run(self, exp_idx, experiment): + tc = experiment.conf + wload = experiment.wload tc_idx = tc['tag'] self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\ .format(exp_idx, self._exp_count, - tc_idx, wl_idx, - run_idx, self._tests_conf['iterations'])) + tc_idx, experiment.wload_name, + experiment.iteration, self._iterations)) # Setup local results folder - self._wload_run_init(run_idx) + logging.debug(r'%14s - out_dir [%s]', 'Executor', experiment.out_dir) + os.system('mkdir -p ' + experiment.out_dir) # FTRACE: start (if a configuration has been provided) if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): @@ -513,23 +522,23 @@ class Executor(): self.te.emeter.reset() # WORKLOAD: Run the configured workload - wload.run(out_dir=self.te.out_dir, cgroup=self._cgroup) + wload.run(out_dir=experiment.out_dir, cgroup=self._cgroup) # ENERGY: collect measurements if self.te.emeter: - self.te.emeter.report(self.te.out_dir) + self.te.emeter.report(experiment.out_dir) # FTRACE: stop and collect measurements if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'): self.te.ftrace.stop() - trace_file = self.te.out_dir + '/trace.dat' + trace_file = experiment.out_dir + '/trace.dat' self.te.ftrace.get_trace(trace_file) logging.info(r'%14s - Collected FTrace binary trace:', 'Executor') logging.info(r'%14s - %s', 'Executor', trace_file.replace(self.te.res_dir, '')) - stats_file = self.te.out_dir + '/trace_stat.json' + stats_file = experiment.out_dir + '/trace_stat.json' self.te.ftrace.get_stats(stats_file) logging.info(r'%14s - Collected FTrace function profiling:', 'Executor') logging.info(r'%14s - %s', 'Executor', diff --git a/libs/utils/test.py b/libs/utils/test.py index a0c101f5360e4ddafade53bae55431abd5c64f75..e497cc8fb83054f7e577c792cb021d731dbad5e0 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -19,6 +19,11 @@ import logging import os import unittest +from bart.sched.SchedAssert import SchedAssert +from bart.sched.SchedMultiAssert import SchedMultiAssert +from devlib.utils.misc import memoized +import wrapt + from conf import JsonConf from executor import Executor @@ -100,4 +105,62 @@ class LisaTest(unittest.TestCase): Code executed after running the experiments """ + @memoized + def get_multi_assert(self, experiment, task_filter=""): + """ + Return a SchedMultiAssert over the tasks whose names contain task_filter + + By default, this includes _all_ the tasks that were executed for the + experiment. + """ + tasks = experiment.wload.tasks.keys() + return SchedMultiAssert(experiment.out_dir, + self.te.topology, + [t for t in tasks if task_filter in t]) + + def get_start_time(self, experiment): + """ + Get the time at which the experiment workload began executing + """ + start_times_dict = self.get_multi_assert(experiment).getStartTime() + return min([t["starttime"] for t in start_times_dict.itervalues()]) + + def get_end_times(self, experiment): + """ + Get the time at which each task in the workload finished + + Returned as a dict; {"task_name": finish_time, ...} + """ + + end_times = {} + for task in experiment.wload.tasks.keys(): + sched_assert = SchedAssert(experiment.out_dir, self.te.topology, + execname=task) + end_times[task] = sched_assert.getEndTime() + + return end_times + + +@wrapt.decorator +def experiment_test(wrapped_test, instance, args, kwargs): + """ + Convert a LisaTest test method to be automatically called for each experiment + + The method will be passed the experiment object and a list of the names of + tasks that were run as the experiment's workload. + """ + for experiment in instance.executor.experiments: + tasks = experiment.wload.tasks.keys() + try: + wrapped_test(experiment, tasks, *args, **kwargs) + except AssertionError as e: + trace_relpath = os.path.join(experiment.out_dir, "trace.dat") + add_msg = "\n\tCheck trace file: " + os.path.abspath(trace_relpath) + orig_msg = e.args[0] if len(e.args) else "" + e.args = (orig_msg + add_msg,) + e.args[1:] + raise + +# Prevent nosetests from running experiment_test directly as a test case +experiment_test.__test__ = False + # vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/tests/eas/acceptance.py b/tests/eas/acceptance.py index 3fc65a9ed3b2159f318ad29799a3003aae5082aa..fe3c0fd94ef3c7092a54c7e7036c03ee29448da6 100644 --- a/tests/eas/acceptance.py +++ b/tests/eas/acceptance.py @@ -15,19 +15,19 @@ # limitations under the License. # -from env import TestEnv -from wlgen import RTA, Periodic, Step -from devlib.target import TargetError - -import trappy -import shutil +import json +import operator import os +import trappy import unittest -import logging -import json -import logging -logging.basicConfig(level=logging.INFO) +from bart.sched.SchedAssert import SchedAssert + +from devlib.target import TargetError + +from env import TestEnv +from test import LisaTest, experiment_test + # Read the config file and update the globals CONF_FILE = os.path.join( os.path.dirname( @@ -38,77 +38,43 @@ with open(CONF_FILE, "r") as fh: conf_vars = json.load(fh) globals().update(conf_vars) +class EasTest(LisaTest): + """ + Base class for EAS tests + """ -def local_setup(env): - env.target.cpufreq.set_all_governors("performance") - - if ENABLE_EAS: - env.target.execute( - "echo ENERGY_AWARE > /sys/kernel/debug/sched_features") - - if SET_IS_BIG_LITTLE: - try: - env.target.write_value("/proc/sys/kernel/sched_is_big_little", 1) - except TargetError: - # That flag doesn't exist on mainline-integration kernels, so don't - # worry if the file isn't present. - pass - -def between_threshold_pct(a, b): - THRESHOLD_PERCENT = 3 - lower = b - THRESHOLD_PERCENT - upper = b + THRESHOLD_PERCENT - - if a >= lower and a <= upper: - return True - return False - - -def between_threshold_abs(a, b): - THRESHOLD = 0.25 - lower = b - THRESHOLD - upper = b + THRESHOLD - - if a >= lower and a <= upper: - return True - return False - - -SMALL_WORKLOAD = { - - "duty_cycle_pct": SMALL_DCYCLE, - "duration_s": WORKLOAD_DURATION_S, - "period_ms": WORKLOAD_PERIOD_MS, -} - -BIG_WORKLOAD = { - - "duty_cycle_pct": BIG_DCYCLE, - "duration_s": WORKLOAD_DURATION_S, - "period_ms": WORKLOAD_PERIOD_MS, -} + @classmethod + def setUpClass(cls, *args, **kwargs): + conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), + cls.conf_basename) -STEP_WORKLOAD = { + super(EasTest, cls)._init(conf_file, *args, **kwargs) - "start_pct": STEP_LOW_DCYCLE, - "end_pct": STEP_HIGH_DCYCLE, - "time_s": WORKLOAD_DURATION_S, - "loops": 2 -} + @classmethod + def _experimentsInit(cls, *args, **kwargs): + super(EasTest, cls)._experimentsInit(*args, **kwargs) -from bart.sched.SchedAssert import SchedAssert -from bart.sched.SchedMultiAssert import SchedMultiAssert -import operator -import json + if SET_IS_BIG_LITTLE: + try: + cls.target.write_value( + "/proc/sys/kernel/sched_is_big_little", 1) + except TargetError: + # That flag doesn't exist on mainline-integration kernels, so + # don't worry if the file isn't present. + pass + def _do_test_first_cpu(self, experiment, tasks): + """Test that all tasks start on a big CPU""" -def log_result(data, log_fh): - result_str = json.dumps(data, indent=3) - logging.info(result_str) - log_fh.write(result_str) + sched_assert = self.get_multi_assert(experiment) + self.assertTrue( + sched_assert.assertFirstCpu( + self.target.bl.bigs, + rank=len(tasks)), + msg="Not all the new generated tasks started on a big CPU") -class ForkMigration(unittest.TestCase): +class ForkMigration(EasTest): """ Goal ==== @@ -127,70 +93,14 @@ class ForkMigration(unittest.TestCase): The threads start on a big core. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.task_prefix = "fmig" - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join(cls.env.res_dir, "fork_migration.dat") - cls.log_file = os.path.join(cls.env.res_dir, "fork_migration.json") - cls.populate_params() - cls.tasks = cls.params.keys() - cls.num_tasks = len(cls.tasks) - local_setup(cls.env) - cls.run_workload() - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - big_prefix = cls.task_prefix + "_big" - for idx in range(len(cls.env.target.bl.bigs)): - task = big_prefix + str(idx) - cls.params[task] = Periodic(**BIG_WORKLOAD).get() - - little_prefix = cls.task_prefix + "_little" - for idx in range(len(cls.env.target.bl.littles)): - task = little_prefix + str(idx) - cls.params[task] = Periodic(**SMALL_WORKLOAD).get() - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "fork_migration", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - def test_first_cpu(self): - "Fork Migration: Test First CPU" - - logging.info("Fork Migration: Test First CPU") - f_assert = SchedMultiAssert( - self.trace_file, - self.env.topology, - execnames=self.tasks) - - log_result( - f_assert.getFirstCpu(), self.log_fh) + conf_basename = "acceptance_fork_migration.config" - self.assertTrue( - f_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Fork Migration: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) - -class SmallTaskPacking(unittest.TestCase): +class SmallTaskPacking(EasTest): """ Goal ==== @@ -210,84 +120,31 @@ class SmallTaskPacking(unittest.TestCase): All tasks run on little cpus. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.task_prefix = "stp" - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join( - cls.env.res_dir, - "small_task_packing.dat") - cls.log_file = os.path.join(cls.env.res_dir, "small_task_packing.json") - cls.num_tasks = len(cls.env.target.bl.bigs + cls.env.target.bl.littles) - cls.populate_params() - cls.tasks = cls.params.keys() - local_setup(cls.env) - cls.run_workload() - cls.s_assert = SchedMultiAssert( - cls.trace_file, - cls.env.topology, - execnames=cls.tasks) - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") + conf_basename = "acceptance_small_task_packing.config" - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - for i in range(cls.num_tasks): - task = cls.task_prefix + str(i) - cls.params[task] = Periodic(**SMALL_WORKLOAD).get() - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "small_task_packing", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - def test_small_task_pack_first_cpu(self): - "Small Task Packing: First CPU: BIG" - - logging.info("Small Task Packing: First CPU: BIG\n") - log_result(self.s_assert.getFirstCpu(), self.log_fh) - self.assertTrue( - self.s_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Small Task Packing: test first CPU""" + self._do_test_first_cpu(experiment, tasks) - def test_small_task_residency(self): + @experiment_test + def test_small_task_residency(self, experiment, tasks): "Small Task Packing: Test Residency (Little Cluster)" - logging.info("Small Task Packing: Test Residency (Little Cluster)") - log_result( - self.s_assert.getResidency( - "cluster", - self.env.target.bl.littles, - percent=True), self.log_fh) + sched_assert = self.get_multi_assert(experiment) self.assertTrue( - self.s_assert.assertResidency( + sched_assert.assertResidency( "cluster", - self.env.target.bl.littles, + self.target.bl.littles, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, - rank=self.num_tasks), + rank=len(tasks)), msg="Not all tasks are running on LITTLE cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) - -class OffloadMigrationAndIdlePull(unittest.TestCase): +class OffloadMigrationAndIdlePull(EasTest): """ Goal ==== @@ -336,130 +193,28 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.env = TestEnv(test_conf=TEST_CONF) - cls.trace_file = os.path.join(cls.env.res_dir, "offload_idle_pull.dat") - cls.log_file = os.path.join(cls.env.res_dir, "offload_idle_pull.json") - cls.early_starters = [] - cls.migrators = [] - cls.num_tasks = len(cls.env.target.bl.bigs) - cls.populate_tasks() - local_setup(cls.env) - cls.run_workload() - - cls.trace = trappy.FTrace(cls.trace_file) - cls.m_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=cls.migrators) - cls.e_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=cls.early_starters) - - all_tasks = cls.early_starters + cls.migrators - cls.a_assert = SchedMultiAssert(cls.trace, cls.env.topology, - execnames=all_tasks) - cls.offset = cls.get_offset() - - cls.end_times = cls.calculate_end_times() - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_tasks(cls): - migrator_workload = BIG_WORKLOAD.copy() - migrator_workload["duration_s"] = 9 - migrator_workload["delay_s"] = OFFLOAD_MIGRATION_MIGRATOR_DELAY - - for idx in range(cls.num_tasks): - task = "early_starters" + str(idx) - cls.params[task] = Periodic(**BIG_WORKLOAD).get() - cls.early_starters.append(task) + conf_basename = "acceptance_offload_idle_pull.config" - # Tasks that will be idle pulled - task = "migrator" + str(idx) - cls.params[task] = Periodic(**migrator_workload).get() - cls.migrators.append(task) + @experiment_test + def test_first_cpu(self, experiment, tasks): + """Offload Migration and Idle Pull: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) - @classmethod - def run_workload(cls): - - wload = RTA( - cls.env.target, - "offload_idle_pull", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - @classmethod - def get_offset(cls): - task_start_times = cls.a_assert.getStartTime().values() - return min([t['starttime'] for t in task_start_times]) - - @classmethod - def calculate_end_times(cls): - - end_times = {} - for task in cls.params.keys(): - sched_assert = SchedAssert(cls.trace, cls.env.topology, - execname=task) - end_times[task] = sched_assert.getEndTime() - - return end_times - - def get_migrator_activation_time(self): - start_times_dict = self.m_assert.getStartTime() - start_time = min(t['starttime'] for t in start_times_dict.itervalues()) - - return start_time + OFFLOAD_MIGRATION_MIGRATOR_DELAY - - def test_first_cpu_early_starters(self): - """Offload Migration and Idle Pull: Test First CPU (Early Starters)""" - - logging.info( - "Offload Migration and Idle Pull: Test First CPU (Early Starters)") - log_result( - self.e_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.e_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new 'early starter' tasks started on a big CPU") - - def test_first_cpu_migrators(self): - "Offload Migration and Idle Pull: Test First CPU (Migrators)" - - logging.info( - "Offload Migration and Idle Pull: Test First CPU (Migrators)") - - log_result( - self.m_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.m_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new 'migrator' tasks started on a big CPU") - - def test_big_cpus_fully_loaded(self): + @experiment_test + def test_big_cpus_fully_loaded(self, experiment, tasks): """Offload Migration and Idle Pull: Big cpus are fully loaded as long as there are tasks left to run in the system""" - num_big_cpus = len(self.env.target.bl.bigs) + num_big_cpus = len(self.target.bl.bigs) + + sched_assert = self.get_multi_assert(experiment) - end_times = sorted(self.end_times.values()) + end_times = sorted(self.get_end_times(experiment).values()) # Window of time until the first migrator finishes - window = (self.offset, end_times[-num_big_cpus]) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.bigs, - window=window, percent=True) + window = (self.get_start_time(experiment), end_times[-num_big_cpus]) + busy_time = sched_assert.getCPUBusyTime("cluster", + self.target.bl.bigs, + window=window, percent=True) + msg = "Big cpus were not fully loaded while there were enough big tasks to fill them" self.assertGreater(busy_time, OFFLOAD_EXPECTED_BUSY_TIME_PCT, msg=msg) @@ -468,9 +223,9 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): for i in range(num_big_cpus-1): big_cpus_left = num_big_cpus - i - 1 window = (end_times[-num_big_cpus+i], end_times[-num_big_cpus+i+1]) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.bigs, - window=window, percent=True) + busy_time = sched_assert.getCPUBusyTime("cluster", + self.target.bl.bigs, + window=window, percent=True) expected_busy_time = OFFLOAD_EXPECTED_BUSY_TIME_PCT * \ big_cpus_left / num_big_cpus @@ -479,21 +234,27 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): self.assertGreater(busy_time, expected_busy_time, msg=msg) - def test_little_cpus_run_tasks(self): + @experiment_test + def test_little_cpus_run_tasks(self, experiment, tasks): """Offload Migration and Idle Pull: Little cpus run tasks while bigs are busy""" - tasks = self.params.keys() + num_offloaded_tasks = len(tasks) / 2 - first_task_finish_time = None - for task in tasks: - end_time = self.end_times[task] - if not first_task_finish_time or (end_time < first_task_finish_time): - first_task_finish_time = end_time + end_times = self.get_end_times(experiment).values() + first_task_finish_time = min(end_times) - window = (self.get_migrator_activation_time(), first_task_finish_time) - busy_time = self.a_assert.getCPUBusyTime("cluster", - self.env.target.bl.littles, - window=window) + migrators_assert = self.get_multi_assert(experiment, "migrator") + start_time = min(t["starttime"] + for t in migrators_assert.getStartTime().itervalues()) + migrator_activation_time = start_time + OFFLOAD_MIGRATION_MIGRATOR_DELAY + + window = (migrator_activation_time, first_task_finish_time) + + all_tasks_assert = self.get_multi_assert(experiment) + + busy_time = all_tasks_assert.getCPUBusyTime("cluster", + self.target.bl.littles, + window=window) window_len = window[1] - window[0] expected_busy_time = window_len * num_offloaded_tasks * \ @@ -502,25 +263,26 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): self.assertGreater(busy_time, expected_busy_time, msg=msg) - def test_all_tasks_run_on_a_big_cpu(self): + @experiment_test + def test_all_tasks_run_on_a_big_cpu(self, experiment, tasks): """Offload Migration and Idle Pull: All tasks run on a big cpu at some point Note: this test may fail in big.LITTLE platforms in which the little cpus are almost as performant as the big ones. """ - - for task in self.params.keys(): - sa = SchedAssert(self.trace, self.env.topology, execname=task) - window = (0, self.end_times[task]) - big_residency = sa.getResidency("cluster", self.env.target.bl.bigs, + for task in tasks: + sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) + end_times = self.get_end_times(experiment) + window = (0, end_times[task]) + big_residency = sa.getResidency("cluster", self.target.bl.bigs, window=window, percent=True) - log_result(big_residency, self.log_fh) msg = "Task {} didn't run on a big cpu.".format(task) self.assertGreater(big_residency, 0, msg=msg) - def test_all_tasks_finish_on_a_big_cpu(self): + @experiment_test + def test_all_tasks_finish_on_a_big_cpu(self, experiment, tasks): """Offload Migration and Idle Pull: All tasks finish on a big cpu Note: this test may fail in big.LITTLE systems where the @@ -529,15 +291,14 @@ class OffloadMigrationAndIdlePull(unittest.TestCase): task. """ - - for task in self.params.keys(): - sa = SchedAssert(self.trace, self.env.topology, execname=task) + for task in tasks: + sa = SchedAssert(experiment.out_dir, self.te.topology, execname=task) msg = "Task {} did not finish on a big cpu".format(task) - self.assertIn(sa.getLastCpu(), self.env.target.bl.bigs, msg=msg) + self.assertIn(sa.getLastCpu(), self.target.bl.bigs, msg=msg) -class WakeMigration(unittest.TestCase): +class WakeMigration(EasTest): """ Goal ==== @@ -561,193 +322,68 @@ class WakeMigration(unittest.TestCase): the big cpus when they are big. """ - @classmethod - def setUpClass(cls): - cls.params = {} - cls.env = TestEnv(test_conf=TEST_CONF) - cls.task_prefix = "wmig" - cls.trace_file = os.path.join(cls.env.res_dir, "wake_migration.dat") - cls.log_file = os.path.join(cls.env.res_dir, "wake_migration.json") - cls.populate_params() - cls.tasks = cls.params.keys() - cls.num_tasks = len(cls.tasks) - local_setup(cls.env) - cls.run_workload() - cls.s_assert = SchedMultiAssert( - cls.trace_file, - cls.env.topology, - execnames=cls.tasks) - cls.offset = cls.get_offset(cls.tasks[0]) - cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") - - @classmethod - def tearDownClass(cls): - cls.log_fh.close() - - @classmethod - def populate_params(cls): - num_big_cpus = len(cls.env.target.bl.bigs) - - for i in range(num_big_cpus): - task_name = "{}_{}".format(cls.task_prefix, i) - cls.params[task_name] = Step(**STEP_WORKLOAD).get() + conf_basename = "acceptance_wake_migration.config" - cls.phase_duration = STEP_WORKLOAD["time_s"] - - @classmethod - def run_workload(cls): - wload = RTA( - cls.env.target, - "wake_migration", - calibration=cls.env.calibration()) - wload.conf(kind="profile", params=cls.params) - cls.env.ftrace.start() - wload.run( - out_dir=cls.env.res_dir, - background=False) - cls.env.ftrace.stop() - trace = cls.env.ftrace.get_trace(cls.trace_file) - - @classmethod - def get_offset(cls, task_name): - return SchedAssert( - cls.trace_file, - cls.env.topology, - execname=task_name).getStartTime() - - def test_first_cpu(self): + @experiment_test + def test_first_cpu(self, experiment, tasks): """Wake Migration: Test First CPU""" + self._do_test_first_cpu(experiment, tasks) - logging.info("Wake Migration: Test First CPU") + def _assert_switch(self, experiment, expected_switch_to, phases): + if expected_switch_to == "big": + switch_from = self.target.bl.littles + switch_to = self.target.bl.bigs + elif expected_switch_to == "little": + switch_from = self.target.bl.bigs + switch_to = self.target.bl.littles + else: + raise ValueError("Invalid expected_switch_to") - log_result(self.s_assert.getFirstCpu(), self.log_fh) - - self.assertTrue( - self.s_assert.assertFirstCpu( - self.env.target.bl.bigs, - rank=self.num_tasks), - msg="Not all the new generated tasks started on a big CPU") + sched_assert = self.get_multi_assert(experiment) + expected_time = (self.get_start_time(experiment) + + phases*WORKLOAD_DURATION_S) + switch_window = (max(expected_time - SWITCH_WINDOW_HALF, 0), + expected_time + SWITCH_WINDOW_HALF) - def test_little_big_switch1(self): - """Wake Migration: LITTLE -> BIG: 1""" - expected_time = self.offset + self.phase_duration - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) - - logging.info( - "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - window=switch_window), self.log_fh) + fmt = "Not all tasks wake-migrated to {} cores in the expected window: {}" + msg = fmt.format(expected_switch_to, switch_window) self.assertTrue( - self.s_assert.assertSwitch( + sched_assert.assertSwitch( "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - rank=self.num_tasks, - window=switch_window), - msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ - .format(switch_window)) - - def test_little_big_switch2(self): + switch_from, + switch_to, + window=switch_window, + rank=len(experiment.wload.tasks)), + msg=msg) + + @experiment_test + def test_little_big_switch1(self, experiment, tasks): + """Wake Migration: LITTLE -> BIG: 1""" + self._assert_switch(experiment, "big", 1) + + @experiment_test + def test_little_big_switch2(self, experiment, tasks): """Wake Migration: LITTLE -> BIG: 2""" # little - big - little - big # ^ # We want to test that this little to big migration happens. So we skip # the first three phases. - expected_time = self.offset + 3 * self.phase_duration - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) - - logging.info( - "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - window=switch_window), self.log_fh) + self._assert_switch(experiment, "big", 3) - self.assertTrue( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.littles, - self.env.target.bl.bigs, - rank=self.num_tasks, - window=switch_window), - msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ - .format(switch_window)) - - def test_big_little_switch1(self): + @experiment_test + def test_big_little_switch1(self, experiment, tasks): """Wake Migration: BIG -> LITLLE: 1""" - expected_time = self.offset - switch_window = ( - max(expected_time - SWITCH_WINDOW_HALF, 0), expected_time + SWITCH_WINDOW_HALF) + self._assert_switch(experiment, "little", 0) - logging.info( - "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - window=switch_window), self.log_fh) - - self.assertTrue( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - rank=self.num_tasks, - window=switch_window), - msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ - .format(switch_window)) - - def test_big_little_switch2(self): + @experiment_test + def test_big_little_switch2(self, experiment, tasks): """Wake Migration: BIG -> LITLLE: 2""" # little - big - little - big # ^ # We want to test that this big to little migration happens. So we skip # the first two phases. - expected_time = self.offset + 2 * self.phase_duration - switch_window = ( - expected_time - - SWITCH_WINDOW_HALF, - expected_time + - SWITCH_WINDOW_HALF) - - logging.info( - "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) - - log_result( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - window=switch_window), self.log_fh) - - self.assertTrue( - self.s_assert.assertSwitch( - "cluster", - self.env.target.bl.bigs, - self.env.target.bl.littles, - rank=self.num_tasks, - window=switch_window), - msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ - .format(switch_window)) + self._assert_switch(experiment, "little", 2) diff --git a/tests/eas/acceptance_fork_migration.config b/tests/eas/acceptance_fork_migration.config new file mode 100644 index 0000000000000000000000000000000000000000..02d75ec708108d328738395b6724b96f751d1b54 --- /dev/null +++ b/tests/eas/acceptance_fork_migration.config @@ -0,0 +1,55 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + // Create N 100% tasks and M 10% tasks which run in parallel, where N is + // the number of big CPUs and M is the number of LITTLE CPUs. + "fmig" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "small" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 10, + "duration_s": 5, + "period_ms": 10, + }, + "prefix" : "small" + }, + "big" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s" : 5, + "period_ms": 10 + }, + "prefix" : "large", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} diff --git a/tests/eas/acceptance_offload_idle_pull.config b/tests/eas/acceptance_offload_idle_pull.config new file mode 100644 index 0000000000000000000000000000000000000000..f53d76c2b0ce3f3451762b6c972c3d0eb4c63a0e --- /dev/null +++ b/tests/eas/acceptance_offload_idle_pull.config @@ -0,0 +1,56 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + "early_and_migrators" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "early" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s": 5, + "period_ms": 10, + }, + // Create one task for each big CPU + "tasks" : "big", + }, + "migrator" : { + "kind" : "Periodic", + "params" : { + "duty_cycle_pct": 100, + "duration_s": 5, + "period_ms": 10, + "delay_s": 1 + }, + // Create one task for each big CPU + "tasks" : "big", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} diff --git a/tests/eas/acceptance_small_task_packing.config b/tests/eas/acceptance_small_task_packing.config new file mode 100644 index 0000000000000000000000000000000000000000..251799433ad6a457d7780e75a303b0e3630a2159 --- /dev/null +++ b/tests/eas/acceptance_small_task_packing.config @@ -0,0 +1,42 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch" + ], + }, + "wloads" : { + // Create one small task for each CPU + "small_tasks" : { + "type" : "rt-app", + "conf" : { + "class" : "periodic", + "params" : { + "duty_cycle_pct": 10, + "duration_s": 5, + "period_ms": 10, + }, + // Create one task for each CPU + "tasks" : "cpus", + "prefix" : "stp" + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +} diff --git a/tests/eas/acceptance_wake_migration.config b/tests/eas/acceptance_wake_migration.config new file mode 100644 index 0000000000000000000000000000000000000000..963f101fb88fff985109ede272100c4ea1106f00 --- /dev/null +++ b/tests/eas/acceptance_wake_migration.config @@ -0,0 +1,48 @@ +{ + "modules" : [ "bl" ], + "exclude_modules" : [ "hwmon" ], + "tools" : [ "rt-app" ], + "ftrace" : { + "events" : [ + "sched_overutilized", + "sched_energy_diff", + "sched_load_avg_task", + "sched_load_avg_cpu", + "sched_migrate_task", + "sched_switch", + "cpu_frequency", + ], + }, + "wloads" : { + // Create one small task for each CPU + "wake_migration" : { + "type" : "rt-app", + "conf" : { + "class" : "profile", + "params" : { + "wmig" : { + "kind" : "Step", + "params" : { + "start_pct": 10, // STEP_LOW_DCYCLE + "end_pct": 50, // STEP_HIGH_DCYCLE + "time_s": 5, // WORKLOAD_DURATION_S + "loops": 2 + }, + // Create one task for each CPU + "tasks" : "big", + }, + }, + }, + }, + }, + "confs" : [ + { + "tag" : "", + "flags" : "ftrace", + "sched_features" : "ENERGY_AWARE", + "cpufreq" : { + "governor" : "performance" + } + } + ] +}