diff --git a/libs/utils/executor.py b/libs/utils/executor.py index 7c7060b22fed9703c23816b4fd1155d0055d7a78..883c07470ef62e439a0dd446a6f0e217a121418a 100644 --- a/libs/utils/executor.py +++ b/libs/utils/executor.py @@ -166,6 +166,13 @@ class Executor(): Number of iterations for each workload/conf combination. Default is 1. :type experiments_conf: dict + + :ivar experiments: After calling `meth`:run:, the list of + :class:`Experiment` s that were run + + :ivar iterations: The number of iterations run for each wload/conf pair + (i.e. ``experiments_conf['iterations']``. + """ critical_tasks = { @@ -215,9 +222,9 @@ class Executor(): self.te = test_env self.target = self.te.target - self._iterations = self._experiments_conf.get('iterations', 1) + self.iterations = self._experiments_conf.get('iterations', 1) # Compute total number of experiments - self._exp_count = self._iterations \ + self._exp_count = self.iterations \ * len(self._experiments_conf['wloads']) \ * len(self._experiments_conf['confs']) @@ -233,7 +240,7 @@ class Executor(): self._log.info(' %3d workloads (%d iterations each)', len(self._experiments_conf['wloads']), - self._iterations) + self.iterations) wload_confs = ', '.join(self._experiments_conf['wloads']) self._log.info(' %s', wload_confs) @@ -261,7 +268,7 @@ class Executor(): for wl_idx in self._experiments_conf['wloads']: # TEST: configuration wload, test_dir = self._wload_init(tc, wl_idx) - for itr_idx in range(1, self._iterations + 1): + for itr_idx in range(1, self.iterations + 1): exp = Experiment( wload_name=wl_idx, wload=wload, @@ -663,7 +670,7 @@ class Executor(): self._print_title('Experiment {}/{}, [{}:{}] {}/{}'\ .format(exp_idx, self._exp_count, tc_idx, experiment.wload_name, - experiment.iteration, self._iterations)) + experiment.iteration, self.iterations)) # Setup local results folder self._log.debug('out_dir set to [%s]', experiment.out_dir) diff --git a/libs/utils/test.py b/libs/utils/test.py index b6062168404c1baa5162ad240901fce6d6f56ad9..d3b96ec45a546406c926ac6ea22922192a58db88 100644 --- a/libs/utils/test.py +++ b/libs/utils/test.py @@ -56,6 +56,9 @@ class LisaTest(unittest.TestCase): experiments_conf = None """Override this with a dictionary or JSON path to configure the Executor""" + permitted_fail_pct = 0 + """The percentage of iterations of each test that may be permitted to fail""" + @classmethod def _getTestConf(cls): if cls.test_conf is None: @@ -81,9 +84,9 @@ class LisaTest(unittest.TestCase): """ Set up logging and trigger running experiments """ - cls.logger = logging.getLogger('LisaTest') + cls._log = logging.getLogger('LisaTest') - cls.logger.info('Setup tests execution engine...') + cls._log.info('Setup tests execution engine...') test_env = TestEnv(test_conf=cls._getTestConf()) experiments_conf = cls._getExperimentsConf(test_env) @@ -100,7 +103,7 @@ class LisaTest(unittest.TestCase): # Execute pre-experiments code defined by the test cls._experimentsInit() - cls.logger.info('Experiments execution...') + cls._log.info('Experiments execution...') cls.executor.run() cls.experiments = cls.executor.experiments @@ -213,16 +216,33 @@ def experiment_test(wrapped_test, instance, args, kwargs): The method will be passed the experiment object and a list of the names of tasks that were run as the experiment's workload. """ + failures = {} for experiment in instance.executor.experiments: tasks = experiment.wload.tasks.keys() try: wrapped_test(experiment, tasks, *args, **kwargs) except AssertionError as e: trace_relpath = os.path.join(experiment.out_dir, "trace.dat") - add_msg = "\n\tCheck trace file: " + os.path.abspath(trace_relpath) - orig_msg = e.args[0] if len(e.args) else "" - e.args = (orig_msg + add_msg,) + e.args[1:] - raise + add_msg = "Check trace file: " + os.path.abspath(trace_relpath) + msg = str(e) + "\n\t" + add_msg + + test_key = (experiment.wload_name, experiment.conf['tag']) + failures[test_key] = failures.get(test_key, []) + [msg] + + for fails in failures.itervalues(): + iterations = instance.executor.iterations + fail_pct = 100. * len(fails) / iterations + + msg = "{} failures from {} iteration(s):\n{}".format( + len(fails), iterations, '\n'.join(fails)) + if fail_pct > instance.permitted_fail_pct: + raise AssertionError(msg) + else: + instance._log.warning(msg) + instance._log.warning( + 'ALLOWING due to permitted_fail_pct={}'.format( + instance.permitted_fail_pct)) + # Prevent nosetests from running experiment_test directly as a test case experiment_test.__test__ = False