diff --git a/lisa/analysis/base.py b/lisa/analysis/base.py index d79dd61693bf039bbd4d63a4f00f47fcb5d5d19e..e8b491e02ac3173a652df607a7a7630f2025f6df 100644 --- a/lisa/analysis/base.py +++ b/lisa/analysis/base.py @@ -30,7 +30,7 @@ from trappy.utils import listify ResidencyTime = namedtuple('ResidencyTime', ['total', 'active']) ResidencyData = namedtuple('ResidencyData', ['label', 'residency']) -class AnalysisBase(object): +class AnalysisBase: """ Base class for Analysis modules. diff --git a/lisa/env.py b/lisa/env.py index 611c4b836c03733cfb6e724443bc711b59fff0ae..cb2f7605677073f856e8ab9c798ac7e505c86a2a 100644 --- a/lisa/env.py +++ b/lisa/env.py @@ -21,7 +21,6 @@ import os import os.path import contextlib import logging -from pathlib import Path import shlex from collections.abc import Mapping import copy @@ -36,7 +35,7 @@ from devlib.platform.gem5 import Gem5SimulationPlatform from lisa.wlgen.rta import RTA from lisa.energy_meter import EnergyMeter -from lisa.utils import Loggable, MultiSrcConf, HideExekallID, resolve_dotted_name, get_all_subclasses, import_all_submodules, LISA_HOME, StrList +from lisa.utils import Loggable, MultiSrcConf, HideExekallID, resolve_dotted_name, get_all_subclasses, import_all_submodules, LISA_HOME, StrList, setup_logging from lisa.platforms.platinfo import PlatformInfo @@ -49,11 +48,46 @@ RESULT_DIR = 'results' LATEST_LINK = 'results_latest' DEFAULT_DEVLIB_MODULES = ['sched', 'cpufreq', 'cpuidle'] -class ArtifactPath(str): +class ArtifactPath(str, Loggable, HideExekallID): """Path to a folder that can be used to store artifacts of a function. This must be a clean folder, already created on disk. """ - pass + def __new__(cls, root, relative, *args, **kwargs): + root = os.path.realpath(str(root)) + relative = str(relative) + # we only support paths relative to the root parameter + assert not os.path.isabs(relative) + absolute = os.path.join(root, relative) + + # Use a resolved absolute path so it is more convenient for users to + # manipulate + path = os.path.realpath(absolute) + + path_str = super().__new__(cls, path, *args, **kwargs) + # Record the actual root, so we can relocate the path later with an + # updated root + path_str.root = root + path_str.relative = relative + return path_str + + def __fspath__(self): + return str(self) + + def __reduce__(self): + # Serialize the path relatively to the root, so it can be relocated + # easily + relative = self.relative_to(self.root) + return (type(self), (self.root, relative)) + + def relative_to(self, path): + return os.path.relpath(str(self), start=str(path)) + + def with_root(self, root): + # Get the path relative to the old root + relative = self.relative_to(self.root) + + # Swap-in the new root and return a new instance + return type(self)(root, relative) class TargetConf(MultiSrcConf, HideExekallID): YAML_MAP_TOP_LEVEL_KEY = 'target-conf' @@ -164,6 +198,8 @@ class TestEnv(Loggable, HideExekallID): self._res_dir = res_dir if self._res_dir: os.makedirs(self._res_dir, exist_ok=True) + if os.listdir(self._res_dir): + raise ValueError('res_dir must be empty: {}'.format(self._res_dir)) self.target_conf = target_conf logger.debug('Target configuration %s', self.target_conf) @@ -220,7 +256,7 @@ class TestEnv(Loggable, HideExekallID): return cls(target_conf=target_conf, plat_info=plat_info) @classmethod - def from_cli(cls, argv=None): + def from_cli(cls, argv=None) -> 'TestEnv': """ Create a TestEnv from command line arguments. @@ -232,61 +268,79 @@ class TestEnv(Loggable, HideExekallID): to be confusing (help message woes, argument clashes...), so for now this should only be used in scripts that only expect TestEnv args. """ - # Subparsers cannot let us specify --kind=android, at best we could - # have --android which is lousy. Instead, use a first parser to figure - # out the target kind, then create a new parser for that specific kind. - kind_parser = argparse.ArgumentParser( - # Disable the automatic help to not catch e.g. ./script.py -k linux -h - add_help=False, + parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent( """ - Extra arguments differ depending on the value passed to 'kind'. - Try e.g. "--kind android -h" to see the arguments for android targets. - """)) - - kind_parser.add_argument( - "--kind", "-k", choices=["android", "linux", "host"], - help="The kind of target to create") - - # Add a self-managed help argument, see why below - kind_parser.add_argument("--help", "-h", action="store_true") - - args = kind_parser.parse_known_args(argv)[0] - - # Print the generic help only if we can't print the proper --kind help - if not args.kind or (args.help and not args.kind): - kind_parser.print_help() - sys.exit(2) - - kind = args.kind - - parser = argparse.ArgumentParser() - parser.add_argument("--kind", "-k", - choices=[kind], - required=True, - help="The kind of target to create") - - if kind == "android": - parser.add_argument("--device", "-d", type=str, required=True, - help="The ADB ID of the target") - elif kind == "linux": - parser.add_argument("--hostname", "-n", type=str, required=True, dest="host", - help="The hostname/IP of the target") - parser.add_argument("--username", "-u", type=str, required=True, - help="Login username") - parser.add_argument("--password", "-p", type=str, required=True, - help="Login password") - - parser.add_argument("--platform-info", "-pi", type=str, - help="Path to a PlatformInfo yaml file") + Connect to a target using the provided configuration in order + to run a test. + + EXAMPLES + + --target-conf can point to a YAML target configuration file + with all the necessary connection information: + $ {script} --target-conf my_target.yml + + Alternatively, --kind must be set along the relevant credentials: + $ {script} --kind linux --host 192.0.2.1 --username root --password root + + In both cases, --platform-info can point to a PlatformInfo YAML + file. + + """.format( + script=os.path.basename(sys.argv[0]) + ))) + + + kind_group = parser.add_mutually_exclusive_group(required=True) + kind_group.add_argument("--kind", "-k", + choices=["android", "linux", "host"], + help="The kind of target to connect to.") + + kind_group.add_argument("--target-conf", "-t", + help="Path to a TargetConf yaml file. Superseeds other target connection related options.") + + device_group = parser.add_mutually_exclusive_group() + device_group.add_argument("--device", "-d", + help="The ADB ID of the target. Superseeds --host. Only applies to Android kind.") + device_group.add_argument("--host", "-n", + help="The hostname/IP of the target.") + + parser.add_argument("--username", "-u", + help="Login username. Only applies to Linux kind.") + parser.add_argument("--password", "-p", + help="Login password. Only applies to Linux kind.") + + parser.add_argument("--platform-info", "-pi", + help="Path to a PlatformInfo yaml file.") + + parser.add_argument("--log-level", + choices=('warning', 'info', 'debug'), + help="Verbosity level of the logs.") + + parser.add_argument("--res-dir", "-o", + help="Result directory of the created TestEnv. If no directory is specified, a default location under $LISA_HOME will be used.") + + # Options that are not a key in TargetConf must be listed here + not_target_conf_opt = ( + 'platform_info', 'log_level', 'res_dir', 'target_conf', + ) args = parser.parse_args(argv) + if args.log_level: + setup_logging(level=args.log_level.upper()) + + if args.kind and not (args.host or args.device): + parser.error('--host or --device must be specified') + platform_info = PlatformInfo.from_yaml_map(args.platform_info) if args.platform_info else None - target_conf = TargetConf( - {k : v for k, v in vars(args).items() if k != "platform_info"}) + if args.target_conf: + target_conf = TargetConf.from_yaml_map(args.target_conf) + else: + target_conf = TargetConf( + {k : v for k, v in vars(args).items() if k not in not_target_conf_opt}) - return TestEnv(target_conf, platform_info) + return cls(target_conf, platform_info, res_dir=args.res_dir) def _init_target(self, target_conf, res_dir): """ @@ -437,6 +491,13 @@ class TestEnv(Loggable, HideExekallID): """ logger = self.get_logger() + if isinstance(self._res_dir, ArtifactPath): + root = self._res_dir.root + relative = self._res_dir.relative + else: + root = self._res_dir + relative = '' + while True: time_str = datetime.now().strftime('%Y%m%d_%H%M%S.%f') if not name: @@ -444,10 +505,12 @@ class TestEnv(Loggable, HideExekallID): elif append_time: name = "{}-{}".format(name, time_str) - res_dir = os.path.join(self._res_dir, name) + # If we were given an ArtifactPath with an existing root, we + # preserve that root so it can be relocated as the caller wants it + res_dir = ArtifactPath(root, os.path.join(relative,name)) # Compute base installation path - logger.info('Creating result directory: %s', res_dir) + logger.info('Creating result directory: %s', str(res_dir)) # It will fail if the folder already exists. In that case, # append_time should be used to ensure we get a unique name. @@ -464,14 +527,14 @@ class TestEnv(Loggable, HideExekallID): raise if symlink: - res_lnk = Path(LISA_HOME, LATEST_LINK) + res_lnk = os.path.join(LISA_HOME, LATEST_LINK) with contextlib.suppress(FileNotFoundError): - res_lnk.unlink() + os.remove(res_lnk) # There may be a race condition with another tool trying to create # the link with contextlib.suppress(FileExistsError): - res_lnk.symlink_to(res_dir) + os.symlink(res_dir, res_lnk) return res_dir diff --git a/lisa/exekall_customize.py b/lisa/exekall_customize.py index 236255819c419433f7df242d8bdaf125b664e162..d087b219a39f54007004bd0ce971115ac9f12aa7 100644 --- a/lisa/exekall_customize.py +++ b/lisa/exekall_customize.py @@ -27,55 +27,19 @@ from pathlib import Path import xml.etree.ElementTree as ET import traceback -from lisa.env import TestEnv, TargetConf, ArtifactPath +from lisa.env import TestEnv, TargetConf from lisa.platforms.platinfo import PlatformInfo -from lisa.utils import HideExekallID, Loggable +from lisa.utils import HideExekallID, Loggable, ArtifactPath from lisa.tests.kernel.test_bundle import TestBundle, Result, ResultBundle, CannotCreateError from exekall import utils, engine -from exekall.engine import reusable, ExprData, Consumer, PrebuiltOperator, NoValue, get_name +from exekall.engine import reusable, ExprData, Consumer, PrebuiltOperator, NoValue, get_name, get_mro from exekall.customization import AdaptorBase @reusable(False) -class ArtifactStorage(ArtifactPath, Loggable, HideExekallID): - def __new__(cls, root, relative, *args, **kwargs): - root = Path(root).resolve() - relative = Path(relative) - # we only support paths relative to the root parameter - assert not relative.is_absolute() - absolute = root/relative - - # Use a resolved absolute path so it is more convenient for users to - # manipulate - path = absolute.resolve() - - path_str = super().__new__(cls, str(path), *args, **kwargs) - # Record the actual root, so we can relocate the path later with an - # updated root - path_str.root = root - return path_str - - def __fspath__(self): - return str(self) - - def __reduce__(self): - # Serialize the path relatively to the root, so it can be relocated - # easily - relative = self.relative_to(self.root) - return (type(self), (self.root, relative)) - - def relative_to(self, path): - return os.path.relpath(str(self), start=str(path)) - - def with_root(self, root): - # Get the path relative to the old root - relative = self.relative_to(self.root) - - # Swap-in the new root and return a new instance - return type(self)(root, relative) - +class ExekallArtifactPath(ArtifactPath): @classmethod - def from_expr_data(cls, data:ExprData, consumer:Consumer) -> 'ArtifactStorage': + def from_expr_data(cls, data:ExprData, consumer:Consumer) -> 'ExekallArtifactPath': """ Factory used when running under `exekall` """ @@ -138,17 +102,21 @@ class LISAAdaptor(AdaptorBase): parser.add_argument('--platform-info', help="Platform info file") + @staticmethod + def get_default_type_goal_pattern_set(): + return {'*.ResultBundle'} + def get_db_loader(self): return self.load_db @classmethod def load_db(cls, db_path, *args, **kwargs): - # This will relocate ArtifactStorage instances to the new absolute path - # of the results folder, in case it has been moved to another place + # This will relocate ArtifactPath instances to the new absolute path of + # the results folder, in case it has been moved to another place artifact_dir = Path(db_path).parent.resolve() db = engine.StorageDB.from_path(db_path, *args, **kwargs) - # Relocate ArtifactStorage embeded in objects so they will always + # Relocate ArtifactPath embeded in objects so they will always # contain an absolute path that adapts to the local filesystem for serial in db.obj_store.get_all(): val = serial.value @@ -157,7 +125,7 @@ class LISAAdaptor(AdaptorBase): except AttributeError: continue for attr, attr_val in dct.items(): - if isinstance(attr_val, ArtifactStorage): + if isinstance(attr_val, ArtifactPath): setattr(val, attr, attr_val.with_root(artifact_dir) ) @@ -175,7 +143,7 @@ class LISAAdaptor(AdaptorBase): # Add symlinks to artifact folders for ExprValue that were used in the # ExprValue graph, but were initially computed for another Expression - if isinstance(val, ArtifactStorage): + if isinstance(val, ArtifactPath): val = Path(val) is_subfolder = (testcase_artifact_dir.resolve() in val.resolve().parents) # The folder is reachable from our ExprValue, but is not a @@ -287,7 +255,6 @@ class LISAAdaptor(AdaptorBase): if isinstance(value, ResultBundle): result = RESULT_TAG_MAP[value.result] short_msg = value.result.lower_name - #TODO: add API to ResultBundle to print the message without the Result msg = str(value) type_ = type(value) @@ -307,7 +274,7 @@ def append_result_tag(et_testcase, result, type_, short_msg, msg): type=get_name(type_, full_qual=True), type_bases=','.join( get_name(type_, full_qual=True) - for type_ in inspect.getmro(type_) + for type_ in get_mro(type_) ), message=str(short_msg), )) diff --git a/lisa/tests/kernel/cpufreq/sanity.py b/lisa/tests/kernel/cpufreq/sanity.py index 830c509f09ad3de73b96f802d037956ebeddb6d6..c2b1d7a58673b511b713cb1606d1c8717df72927 100644 --- a/lisa/tests/kernel/cpufreq/sanity.py +++ b/lisa/tests/kernel/cpufreq/sanity.py @@ -17,7 +17,8 @@ from lisa.tests.kernel.test_bundle import Result, ResultBundle, TestBundle from lisa.wlgen.sysbench import Sysbench -from lisa.env import TestEnv, ArtifactPath +from lisa.env import TestEnv +from lisa.utils import ArtifactPath class UserspaceSanity(TestBundle): """ diff --git a/lisa/tests/kernel/hotplug/torture.py b/lisa/tests/kernel/hotplug/torture.py index eea607c5b0bd37f7cc9712d76ad5930d4f2197f0..096a90f1fc89548c51a94409c989e254a520ef79 100644 --- a/lisa/tests/kernel/hotplug/torture.py +++ b/lisa/tests/kernel/hotplug/torture.py @@ -24,7 +24,8 @@ from devlib.exception import TimeoutError from lisa.tests.kernel.test_bundle import TestMetric, ResultBundle, TestBundle from lisa.target_script import TargetScript -from lisa.env import TestEnv, ArtifactPath +from lisa.env import TestEnv +from lisa.utils import ArtifactPath class HotplugTorture(TestBundle): diff --git a/lisa/tests/kernel/scheduler/sanity.py b/lisa/tests/kernel/scheduler/sanity.py index fe11bb4273191e2effc66b8ec6e256ccbb0b4ca4..978a15772378536a634ea8bc9366c11c8a96c531 100644 --- a/lisa/tests/kernel/scheduler/sanity.py +++ b/lisa/tests/kernel/scheduler/sanity.py @@ -17,6 +17,8 @@ import sys +from lisa.env import TestEnv +from lisa.utils import ArtifactPath from lisa.tests.kernel.test_bundle import TestMetric, ResultBundle, TestBundle from lisa.wlgen.sysbench import Sysbench @@ -51,6 +53,13 @@ class CapacitySanity(TestBundle): return cls(res_dir, te.plat_info, capa_work) + @classmethod + def from_testenv(cls, te:TestEnv, res_dir:ArtifactPath=None) -> 'CapacitySanity': + """ + Factory method to create a bundle using a live target + """ + return super().from_testenv(te, res_dir) + def test_capacity_sanity(self) -> ResultBundle: """ Assert that higher CPU capacity means more work done diff --git a/lisa/tests/kernel/test_bundle.py b/lisa/tests/kernel/test_bundle.py index 18d732b0993903dbae6213d45d2c6c319e4b2df0..2903a184250de826e09e40a173612f60ca79b536 100644 --- a/lisa/tests/kernel/test_bundle.py +++ b/lisa/tests/kernel/test_bundle.py @@ -19,6 +19,7 @@ import enum import os import os.path import abc +import sys from collections.abc import Mapping from devlib.target import KernelVersion @@ -28,8 +29,8 @@ from lisa.trace import Trace from lisa.wlgen.rta import RTA from lisa.perf_analysis import PerfAnalysis -from lisa.utils import Serializable, memoized -from lisa.env import TestEnv, ArtifactPath +from lisa.utils import Serializable, memoized, ArtifactPath +from lisa.env import TestEnv from lisa.platforms.platinfo import PlatformInfo class TestMetric: @@ -141,6 +142,13 @@ class ResultBundle: """ self.metrics[name] = TestMetric(data, units) + def display_and_exit(self) -> type(None): + print("Test result: {}".format(self)) + if self: + sys.exit(0) + else: + sys.exit(1) + class CannotCreateError(RuntimeError): """ Something prevented the creation of a :class:`TestBundle` instance @@ -290,7 +298,8 @@ class TestBundle(Serializable, abc.ABC): # it does not get broken. if cls.verify_serialization: bundle.to_dir(res_dir) - bundle = cls.from_dir(res_dir) + # Updating the res_dir breaks deserialization for some use cases + bundle = cls.from_dir(res_dir, update_res_dir=False) return bundle @@ -299,13 +308,16 @@ class TestBundle(Serializable, abc.ABC): return os.path.join(res_dir, "{}.yaml".format(cls.__qualname__)) @classmethod - def from_dir(cls, res_dir): + def from_dir(cls, res_dir, update_res_dir=True): """ See :meth:`lisa.utils.Serializable.from_path` """ + res_dir = ArtifactPath(root=res_dir, relative='') + bundle = super().from_path(cls._filepath(res_dir)) # We need to update the res_dir to the one we were given - bundle.res_dir = res_dir + if update_res_dir: + bundle.res_dir = res_dir return bundle diff --git a/lisa/utils.py b/lisa/utils.py index ec53ef6e9c82033070389f94dc8f1486d1c1a866..041fa4557e3dde814d97451a41c8e67e5b3a3c9b 100644 --- a/lisa/utils.py +++ b/lisa/utils.py @@ -107,7 +107,16 @@ def import_all_submodules(pkg): for loader, module_name, is_pkg in pkgutil.walk_packages(pkg.__path__) ] -class Serializable: +class UnknownTagPlaceholder: + def __init__(self, yaml_tag, data, location=None): + self.yaml_tag = yaml_tag + self.data = data + self.location = location + + def __str__(self): + return ''.format(self.yaml_tag) + +class Serializable(Loggable): """ A helper class for YAML serialization/deserialization @@ -135,17 +144,47 @@ class Serializable: yaml = cls._yaml # If allow_unicode=True, true unicode characters will be written to the # file instead of being replaced by escape sequence. - yaml.allow_unicode = (cls.YAML_ENCODING == 'utf-8') + yaml.allow_unicode = ('utf' in cls.YAML_ENCODING) yaml.default_flow_style = False yaml.indent = 4 yaml.Constructor.add_constructor('!include', cls._yaml_include_constructor) yaml.Constructor.add_constructor('!var', cls._yaml_var_constructor) yaml.Constructor.add_multi_constructor('!call:', cls._yaml_call_constructor) + + # Replace unknown tags by a placeholder object containing the data. + # This happens when the class was not imported at the time the object + # was deserialized + yaml.Constructor.add_constructor(None, cls._yaml_unknown_tag_constructor) + #TODO: remove that once the issue is fixed # Workaround for ruamel.yaml bug #244: # https://bitbucket.org/ruamel/yaml/issues/244 yaml.Representer.add_multi_representer(type, yaml.Representer.represent_name) + @classmethod + def _yaml_unknown_tag_constructor(cls, loader, node): + # Get the basic data types that can be expressed using the YAML syntax, + # without using any tag-specific constructor + data = None + for constructor in ( + loader.construct_scalar, + loader.construct_sequence, + loader.construct_mapping + ): + try: + data = constructor(node) + except ruamel.yaml.constructor.ConstructorError: + continue + else: + break + + tag = node.tag + cls.get_logger().debug('Could not find constructor for YAML tag "{tag}" ({mark}), using a placeholder'.format( + tag=tag, + mark=str(node.start_mark).strip() + )) + + return UnknownTagPlaceholder(tag, data, location=node.start_mark) @classmethod def _yaml_call_constructor(cls, loader, suffix, node): @@ -398,6 +437,7 @@ class DeferredValue: return ''.format(self.callback.__qualname__) class MultiSrcConf(SerializableConfABC, Loggable, Mapping): + #TODO: also add a help string in the structure and derive a help paragraph @abc.abstractmethod def STRUCTURE(): """ @@ -923,4 +963,45 @@ def setup_logging(filepath='logging.conf', level=logging.INFO): logging.info('Using LISA logging configuration:') logging.info(' %s', filepath) +class ArtifactPath(str, Loggable, HideExekallID): + """Path to a folder that can be used to store artifacts of a function. + This must be a clean folder, already created on disk. + """ + def __new__(cls, root, relative, *args, **kwargs): + root = os.path.realpath(str(root)) + relative = str(relative) + # we only support paths relative to the root parameter + assert not os.path.isabs(relative) + absolute = os.path.join(root, relative) + + # Use a resolved absolute path so it is more convenient for users to + # manipulate + path = os.path.realpath(absolute) + + path_str = super().__new__(cls, path, *args, **kwargs) + # Record the actual root, so we can relocate the path later with an + # updated root + path_str.root = root + path_str.relative = relative + return path_str + + def __fspath__(self): + return str(self) + + def __reduce__(self): + # Serialize the path relatively to the root, so it can be relocated + # easily + relative = self.relative_to(self.root) + return (type(self), (self.root, relative)) + + def relative_to(self, path): + return os.path.relpath(str(self), start=str(path)) + + def with_root(self, root): + # Get the path relative to the old root + relative = self.relative_to(self.root) + + # Swap-in the new root and return a new instance + return type(self)(root, relative) + # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab diff --git a/tools/bisector b/tools/bisector index e253aad31739221f60194606fb0dc07c63a15f05..8273d3cce83751819d832ebfa8ea791e6687b9ee 100755 --- a/tools/bisector +++ b/tools/bisector @@ -1904,12 +1904,12 @@ class ExekallLISATestStep(ShellStep): show_dist = False, show_details = False, show_pass_rate = False, - show_artifact_dir = False, + show_artifact_dirs = False, testcase = [], iterations = [], ignore_non_issue = False, ignore_excep = [], - dump_artifact_dir = False, + dump_artifact_dirs = False, xunit2json = False, export_logs = False, download = True, @@ -1923,7 +1923,7 @@ class ExekallLISATestStep(ShellStep): show_basic = True show_rates = True show_dist = True - show_artifact_dir = True + show_artifact_dirs = True show_details = True ignore_non_issue = False @@ -2038,7 +2038,7 @@ class ExekallLISATestStep(ShellStep): step_res_seq = filtered_step_res_seq - if show_artifact_dir: + if show_artifact_dirs: out('Results directories:') # Apply processing on selected results @@ -2056,12 +2056,12 @@ class ExekallLISATestStep(ShellStep): else: error('No upload service available, could not upload LISA results.') - if show_artifact_dir: + if show_artifact_dirs: out(' #{i_stack: <2}: {step_res.results_path}'.format(**locals())) # Accumulate the results path to a file, that can be used to garbage # collect all results path that are not referenced by any report. - if dump_artifact_dir: + if dump_artifact_dirs: with open(dump_results_dir, 'a') as f: f.write(step_res.results_path + '\n') @@ -2184,7 +2184,7 @@ class ExekallLISATestStep(ShellStep): if show_details and not (ignore_non_issue and issue == 'passed'): for entry in filtered_entry_list: i_stack = entry['i_stack'] - results_path = '\n' + entry['results_path'] if show_artifact_dir else '' + results_path = '\n' + entry['results_path'] if show_artifact_dirs else '' exception_name, short_msg, msg = entry['details'] if show_details == 'msg': diff --git a/tools/exekall/exekall/customization.py b/tools/exekall/exekall/customization.py index e988275cc77823d8bb976042cf55471bd952ee66..1fb437ad3b93fc55eb1eb7e6f8a5bbd3a441cf71 100644 --- a/tools/exekall/exekall/customization.py +++ b/tools/exekall/exekall/customization.py @@ -40,14 +40,14 @@ class AdaptorBase: def get_db_loader(self): return None - def filter_callable_pool(self, callable_pool): - return callable_pool - - def filter_cls_map(self, cls_map): - return cls_map - - def filter_op_map(self, op_map): - return op_map + def filter_op_pool(self, op_pool): + return { + op for op in op_pool + # Only select operators with non-empty parameter list. This + # rules out all classes __init__ that do not take parameter, as + # they are typically not interesting to us. + if op.get_prototype()[0] + } def get_prebuilt_list(self): return [] @@ -60,6 +60,10 @@ class AdaptorBase: def register_cli_param(parser): pass + @staticmethod + def get_default_type_goal_pattern_set(): + return {'*Result'} + def resolve_cls_name(self, goal): return engine.get_class_from_name(goal, sys.modules) @@ -75,7 +79,7 @@ class AdaptorBase: failed_parents = result.get_failed_values() for failed_parent in failed_parents: excep = failed_parent.excep - return '{type}: {msg}'.format( + return 'EXCEPTION ({type}): {msg}'.format( type = get_name(type(excep)), msg = excep ) diff --git a/tools/exekall/exekall/engine.py b/tools/exekall/exekall/engine.py index 2443e9a27c033d021492bc8745b1e4289bb3d0ac..8ca9b83eec472e2fe66c22f601b71a91cd7b0727 100644 --- a/tools/exekall/exekall/engine.py +++ b/tools/exekall/exekall/engine.py @@ -63,6 +63,13 @@ def get_type_hints(f, module_vars=None): return resolve_annotations(f.__annotations__, module_vars) +def get_mro(cls): + if cls is type(None) or cls is None: + return (type(None), object) + else: + assert isinstance(cls, type) + return inspect.getmro(cls) + def resolve_annotations(annotations, module_vars): return { # If we get a string, evaluate it in the global namespace of the @@ -228,9 +235,6 @@ class ObjectStore: class CycleError(Exception): pass -class IgnoredCycleError(CycleError): - pass - class ExpressionWrapper: def __init__(self, expr): self.expr = expr @@ -242,23 +246,28 @@ class ExpressionWrapper: def build_expr_list(cls, result_op_seq, op_map, cls_map, non_produced_handler='raise', cycle_handler='raise'): op_map = copy.copy(op_map) - cls_map = copy.copy(cls_map) + cls_map = { + cls: compat_cls_set + for cls, compat_cls_set in cls_map.items() + # If there is at least one compatible subclass that is produced, we + # keep it, otherwise it will mislead _build_expr into thinking the + # class can be built where in fact it cannot + if compat_cls_set & op_map.keys() + } for internal_cls in (Consumer, ExprData): op_map[internal_cls] = {Operator(internal_cls)} cls_map[internal_cls] = [internal_cls] expr_list = list() for result_op in result_op_seq: - # We just skip over Expression where a CycleError happened - with contextlib.suppress(IgnoredCycleError): - expr_gen = cls._build_expr(result_op, op_map, cls_map, - op_stack = [], - non_produced_handler=non_produced_handler, - cycle_handler=cycle_handler, - ) - for expr in expr_gen: - if expr.validate_expr(op_map): - expr_list.append(expr) + expr_gen = cls._build_expr(result_op, op_map, cls_map, + op_stack = [], + non_produced_handler=non_produced_handler, + cycle_handler=cycle_handler, + ) + for expr in expr_gen: + if expr.validate_expr(op_map): + expr_list.append(expr) return expr_list @@ -269,6 +278,9 @@ class ExpressionWrapper: if op in op_stack: if cycle_handler == 'ignore': return + elif callable(cycle_handler): + cycle_handler(tuple(op.callable_ for op in new_op_stack)) + return elif cycle_handler == 'raise': raise CycleError('Cyclic dependency found: {path}'.format( path = ' -> '.join( @@ -276,9 +288,7 @@ class ExpressionWrapper: ) )) else: - cycle_handler(tuple(op.callable_ for op in new_op_stack)) - raise IgnoredCycleError - + raise ValueError('Invalid cycle_handler') op_stack = new_op_stack @@ -314,6 +324,11 @@ class ExpressionWrapper: else: if non_produced_handler == 'ignore': return + elif callable(non_produced_handler): + non_produced_handler(wanted_cls.__qualname__, op.name, param, + tuple(op.resolved_callable for op in op_stack) + ) + return elif non_produced_handler == 'raise': raise NoOperatorError('No operator can produce instances of {cls} needed for {op} (parameter "{param}" along path {path})'.format( cls = wanted_cls.__qualname__, @@ -324,10 +339,7 @@ class ExpressionWrapper: ) )) else: - non_produced_handler(wanted_cls.__qualname__, op.name, param, - tuple(op.resolved_callable for op in op_stack) - ) - return + raise ValueError('Invalid non_produced_handler') param_list = remove_indices(param_list, ignored_indices) cls_combis = remove_indices(cls_combis, ignored_indices) @@ -352,11 +364,10 @@ class ExpressionWrapper: op_combi = list(op_combi) # Get all the possible ways of calling these operators - param_combis = itertools.product(*( - cls._build_expr(param_op, op_map, cls_map, + param_combis = itertools.product(*(cls._build_expr( + param_op, op_map, cls_map, op_stack, non_produced_handler, cycle_handler, - ) - for param_op in op_combi + ) for param_op in op_combi )) for param_combi in param_combis: @@ -690,6 +701,7 @@ class Expression: plain_name_cls_set = set() script = '' result_name_map = dict() + reusable_outvar_map = dict() for i, expr in enumerate(expr_list): script += ( '#'*80 + '\n# Computed expressions:' + @@ -703,6 +715,7 @@ class Expression: expr_val_set = set(expr.get_all_values()) result_name, snippet = expr._get_script( + reusable_outvar_map = reusable_outvar_map, prefix = prefix + str(i), obj_store = obj_store, module_name_set = module_name_set, @@ -794,14 +807,42 @@ class Expression: EXPR_DATA_VAR_NAME = 'EXPR_DATA' - def _get_script(self, prefix, obj_store, module_name_set, idt, expr_val_set, consumer_expr_stack): + def _get_script(self, reusable_outvar_map, *args, **kwargs): + with contextlib.suppress(KeyError): + outvar = reusable_outvar_map[self] + return (outvar, '') + outvar, script = self._get_script_internal( + reusable_outvar_map, *args, **kwargs + ) + if self.op.reusable: + reusable_outvar_map[self] = outvar + return (outvar, script) + + def _get_script_internal(self, reusable_outvar_map, prefix, obj_store, module_name_set, idt, expr_val_set, consumer_expr_stack): def make_method_self_name(expr): return expr.op.value_type.__name__.replace('.', '') def make_var(name): - # Make sure we don't have clashes between the variable names - name = name.replace('_', '__') - name = '_' + name if name else '' + # If the variable name already contains a double underscore, we use + # 3 of them for the separator between the prefix and the name, so + # it will avoid ambiguity between these cases: + # prefix="prefix", name="my__name": + # prefix___my__name + # prefix="prefix__my", name="name": + # prefix__my__name + + # Find the longest run of underscores + nr_underscore = 0 + current_counter = 0 + for letter in name: + if letter == '_': + current_counter += 1 + else: + nr_underscore = max(current_counter, nr_underscore) + current_counter = 0 + + sep = (nr_underscore + 1) * '_' + name = sep + name if name else '' return prefix + name def make_comment(code, idt): @@ -927,7 +968,7 @@ class Expression: # Do a deep first search traversal of the expression. param_outvar, param_out = param_expr._get_script( - param_prefix, obj_store, module_name_set, idt, + reusable_outvar_map, param_prefix, obj_store, module_name_set, idt, param_expr_val_set, consumer_expr_stack = consumer_expr_stack + [self], ) @@ -1128,25 +1169,31 @@ class Expression: yield (expr_wrapper, expr_val) def _prepare_exec(self, expr_set): - self.discard_result() + """Apply a flavor of common subexpressions elimination to the Expression + graph and cleanup results of previous runs. - for param, param_expr in list(self.param_map.items()): + :return: return an updated copy of the Expression + """ + # Make a copy so we don't modify the original Expression + new_expr = copy.copy(self) + new_expr.discard_result() + + for param, param_expr in list(new_expr.param_map.items()): # Update the param map in case param_expr was deduplicated - self.param_map[param] = param_expr._prepare_exec(expr_set) + new_expr.param_map[param] = param_expr._prepare_exec(expr_set) # Look for an existing Expression that has the same parameters so we # don't add duplicates. - for replacement_expr in expr_set - {self}: + for replacement_expr in expr_set - {new_expr}: if ( - self.op.callable_ is replacement_expr.op.callable_ and - self.param_map == replacement_expr.param_map + new_expr.op.callable_ is replacement_expr.op.callable_ and + new_expr.param_map == replacement_expr.param_map ): return replacement_expr # Otherwise register this Expression so no other duplicate will be used - else: - expr_set.add(self) - return self + expr_set.add(new_expr) + return new_expr def execute(self, post_compute_cb=None): return self._execute([], post_compute_cb) @@ -1227,10 +1274,8 @@ class Expression: expr_val = ExprValue(self, param_expr_val_map) expr_val_seq = ExprValueSeq( self, None, param_expr_val_map, - post_compute_cb ) expr_val_seq.value_list.append(expr_val) - expr_val_seq.completed = True self.result_list.append(expr_val_seq) yield expr_val continue @@ -1433,10 +1478,10 @@ class Operator: ) } - if hasattr(self.resolved_callable, 'reusable'): - self.reusable = self.resolved_callable.reusable - elif hasattr(self.value_type, 'reusable'): - self.reusable = self.value_type.reusable + if hasattr(self.resolved_callable, '_exekall_reusable'): + self.reusable = self.resolved_callable._exekall_reusable + elif hasattr(self.value_type, '_exekall_reusable'): + self.reusable = self.value_type._exekall_reusable else: self.reusable = self.REUSABLE_DEFAULT @@ -1739,7 +1784,7 @@ class PrebuiltOperator(Operator): def reusable(reusable=Operator.REUSABLE_DEFAULT): def decorator(wrapped): - wrapped.reusable = reusable + wrapped._exekall_reusable = reusable return wrapped return decorator @@ -1749,13 +1794,12 @@ class ExprValueSeq: self.iterator = iterator self.value_list = list() self.param_expr_val_map = param_expr_val_map - self.completed = False self.post_compute_cb = post_compute_cb def get_expr_value_iter(self): callback = self.post_compute_cb if not callback: - callback = lambda x,y: None + callback = lambda x, reused: None def yielder(iteratable, reused): for x in iteratable: @@ -1766,7 +1810,7 @@ class ExprValueSeq: yield from yielder(self.value_list, True) # Then compute the remaining ones - if not self.completed: + if self.iterator: for (value, value_uuid), (excep, excep_uuid) in self.iterator: expr_val = ExprValue(self.expr, self.param_expr_val_map, value, value_uuid, @@ -1789,7 +1833,7 @@ class ExprValueSeq: True ) - self.completed = True + self.iterator = None def any_value_is_NoValue(value_list): return any( @@ -1811,16 +1855,17 @@ class SerializableExprValue: # Pre-compute all the IDs so they are readily available once the value # is deserialized self.recorded_id_map = dict() - for full_qual, with_tags in itertools.product((True, False), repeat=2): - self.recorded_id_map[(full_qual, with_tags)] = expr_val.get_id( - full_qual = full_qual, - with_tags = with_tags, + for full_qual, qual, with_tags in itertools.product((True, False), repeat=3): + self.recorded_id_map[(full_qual, qual, with_tags)] = expr_val.get_id( + full_qual=full_qual, + qual=qual, + with_tags=with_tags, hidden_callable_set=hidden_callable_set, ) self.type_names = [ get_name(type_, full_qual=True) - for type_ in inspect.getmro(expr_val.expr.op.value_type) + for type_ in get_mro(expr_val.expr.op.value_type) if type_ is not object ] @@ -1832,8 +1877,8 @@ class SerializableExprValue: ) self.param_value_map[param] = param_serialzable - def get_id(self, full_qual=True, with_tags=True): - args = (full_qual, with_tags) + def get_id(self, full_qual=True, qual=True, with_tags=True): + args = (full_qual, qual, with_tags) return self.recorded_id_map[args] def get_parent_set(self, predicate, _parent_set=None): @@ -1848,10 +1893,10 @@ class SerializableExprValue: def get_name(obj, full_qual=True, qual=True): # full_qual enabled implies qual enabled - qual = qual or full_qual - + _qual = qual or full_qual # qual disabled implies full_qual disabled full_qual = full_qual and qual + qual = _qual # Add the module's name in front of the name to get a fully # qualified name diff --git a/tools/exekall/exekall/main.py b/tools/exekall/exekall/main.py index d4b64784163871b5ff38ceef4cb6dbc8a2dc7087..61f56c23a7ff7301b0e72fe153e5b8bb38f489af 100755 --- a/tools/exekall/exekall/main.py +++ b/tools/exekall/exekall/main.py @@ -30,7 +30,6 @@ import traceback import uuid import traceback import gzip -import fnmatch import functools import itertools import importlib @@ -72,6 +71,14 @@ than one to choose from.""") help="""Callable names patterns. Types produced by these callables will only be produced by these (other callables will be excluded).""") + run_parser.add_argument('--forbid', action='append', + default=[], + help="""Type names patterns. Callable returning these types or any subclass will not be called.""") + + run_parser.add_argument('--allow', action='append', + default=[], + help="""Allow using callable with a fully qualified name matching these patterns, even if they have been not selected for various reasons..""") + run_parser.add_argument('--modules-root', action='append', default=[], help="Equivalent to setting PYTHONPATH") @@ -102,17 +109,22 @@ refined with --load-type.""") help="""Load the parameters of the values that were used to compute the given UUID from the database.""") - run_parser.add_argument('--goal', default='*ResultBundle', + goal_group = run_parser.add_mutually_exclusive_group() + goal_group.add_argument('--goal', action='append', help="""Compute expressions leading to an instance of the specified class or a subclass of it.""") + goal_group.add_argument('--callable-goal', action='append', + help="""Compute expressions ending with a callable which name is +matching this pattern.""") + run_parser.add_argument('--sweep', nargs=5, action='append', default=[], metavar=('CALLABLE', 'PARAM', 'START', 'STOP', 'STEP'), help="""Parametric sweep on a function parameter. It needs five fields: the qualified name of the callable (pattern can be used), the name of the parameter, the start value, stop value and step size.""") - run_parser.add_argument('--verbose', action='store_true', + run_parser.add_argument('--verbose', '-v', action='count', default=0, help="""More verbose output.""") run_parser.add_argument('--dry-run', action='store_true', @@ -128,8 +140,6 @@ the name of the parameter, the start value, stop value and step size.""") run_parser.add_argument('--debug', action='store_true', help="""Show complete Python backtrace when exekall crashes.""") - - args = argparse.Namespace() # Avoid showing help message on the incomplete parser. Instead, we carry on # and the help will be displayed after the parser customization has a # chance to take place. @@ -142,12 +152,12 @@ the name of the parameter, the start value, stop value and step size.""") # Silence argparse until we know what is going on stream = io.StringIO() with contextlib.redirect_stderr(stream): - args, _ = parser.parse_known_args(no_help_argv, args) + args, _ = parser.parse_known_args(no_help_argv) # If it fails, that may be because of an incomplete command line with just # --help for example. If it was for another reason, it will fail again and # show the message. except SystemExit: - args, _ = parser.parse_known_args(argv, args) + args, _ = parser.parse_known_args(argv) if not args.subcommand: parser.print_help() @@ -217,7 +227,11 @@ the name of the parameter, the start value, stop value and step size.""") dry_run = args.dry_run only_template_scripts = args.template_scripts - goal_pattern = args.goal + type_goal_pattern = args.goal + callable_goal_pattern = args.callable_goal + + if not (type_goal_pattern or callable_goal_pattern): + type_goal_pattern = set(adaptor_cls.get_default_type_goal_pattern_set()) load_db_path = args.load_db load_db_pattern_list = args.load_type @@ -225,7 +239,10 @@ the name of the parameter, the start value, stop value and step size.""") load_db_uuid_args = args.load_uuid_args user_filter = args.filter - restrict_list = args.restrict + restricted_pattern_set = set(args.restrict) + forbidden_pattern_set = set(args.forbid) + allowed_pattern_set = set(args.allow) + allowed_pattern_set.update(restricted_pattern_set) sys.path.extend(args.modules_root) @@ -255,40 +272,6 @@ the name of the parameter, the start value, stop value and step size.""") module_set.update(utils.import_file(path) for path in args.python_files) - # Pool of all callable considered - callable_pool = utils.get_callable_set(module_set) - callable_pool = adaptor.filter_callable_pool(callable_pool) - - op_pool = { - engine.Operator(callable_, tag_list_getter=adaptor.get_tag_list) - for callable_ in callable_pool - } - op_pool = { - op for op in op_pool - # Only select operators with non-empty parameter list. This rules out - # all classes __init__ that do not take parameter, as they are - # typically not interesting to us. - if op.get_prototype()[0] - } - - # Force some parameter values to be provided with a specific callable - patch_map = dict() - for sweep_spec in args.sweep: - number_type = float - callable_pattern, param, start, stop, step = sweep_spec - for callable_ in callable_pool: - callable_name = engine.get_name(callable_) - if not fnmatch.fnmatch(callable_name, callable_pattern): - continue - patch_map.setdefault(callable_name, dict())[param] = [ - i for i in utils.sweep_number( - callable_, param, - number_type(start), number_type(stop), number_type(step) - ) - ] - - only_prebuilt_cls = set() - # Get the prebuilt operators from the adaptor if not load_db_path: prebuilt_op_pool_list = adaptor.get_prebuilt_list() @@ -372,21 +355,49 @@ the name of the parameter, the start value, stop value and step size.""") serial_list = list(group) type_ = type(serial_list[0].value) - id_ = serial_list[0].get_id(full_qual=False, with_tags=True) + id_ = serial_list[0].get_id( + full_qual=False, + qual=False, + # Do not include the tags to avoid having them displayed + # twice, and to avoid wrongfully using the tag of the first + # item in the list for all items. + with_tags=False, + ) prebuilt_op_pool_list.append( engine.PrebuiltOperator( type_, serial_list, id_=id_, tag_list_getter=adaptor.get_tag_list, )) - # Make sure that the provided PrebuiltOperator will be the only ones used - # to provide their types - only_prebuilt_cls.update( - op.obj_type - for op in prebuilt_op_pool_list + # Pool of all callable considered + callable_pool = utils.get_callable_set(module_set) + op_pool = { + engine.Operator(callable_, tag_list_getter=adaptor.get_tag_list) + for callable_ in callable_pool + } + filtered_op_pool = adaptor.filter_op_pool(op_pool) + # Make sure we have all the explicitely allowed operators + filtered_op_pool.update( + op for op in op_pool + if utils.match_name(op.get_name(full_qual=True), allowed_pattern_set) ) + op_pool = filtered_op_pool - only_prebuilt_cls.discard(type(NoValue)) + # Force some parameter values to be provided with a specific callable + patch_map = dict() + for sweep_spec in args.sweep: + number_type = float + callable_pattern, param, start, stop, step = sweep_spec + for callable_ in callable_pool: + callable_name = engine.get_name(callable_, full_qual=True) + if not utils.match_name(callable_name, [callable_pattern]): + continue + patch_map.setdefault(callable_name, dict())[param] = [ + i for i in utils.sweep_number( + callable_, param, + number_type(start), number_type(stop), number_type(step) + ) + ] for op_name, param_patch_map in patch_map.items(): for op in op_pool: @@ -418,8 +429,9 @@ the name of the parameter, the start value, stop value and step size.""") # dependended upon as well. cls_set = set() for produced in produced_pool: - cls_set.update(inspect.getmro(produced)) + cls_set.update(engine.get_mro(produced)) cls_set.discard(object) + cls_set.discard(type(None)) # Map all types to the subclasses that can be used when the type is # requested. @@ -432,34 +444,38 @@ the name of the parameter, the start value, stop value and step size.""") for cls in cls_set } - cls_map = adaptor.filter_cls_map(cls_map) + # Make sure that the provided PrebuiltOperator will be the only ones used + # to provide their types + only_prebuilt_cls = set(itertools.chain.from_iterable( + # Augment the list of classes that can only be provided by a prebuilt + # Operator with all the compatible classes + cls_map[op.obj_type] + for op in prebuilt_op_pool_list + )) - # Augment the list of classes that can only be provided by a prebuilt - # Operator with all the compatible classes - only_prebuilt_cls_ = set() - for cls in only_prebuilt_cls: - only_prebuilt_cls_.update(cls_map[cls]) - only_prebuilt_cls = only_prebuilt_cls_ + only_prebuilt_cls.discard(type(NoValue)) # Map of all produced types to a set of what operator can create them - op_map = dict() - for op in op_pool: - param_map, produced = op.get_prototype() - if not ( - # Some types may only be produced by prebuilt operators - produced in only_prebuilt_cls and - not isinstance(op, engine.PrebuiltOperator) - ): - op_map.setdefault(produced, set()).add(op) - op_map = adaptor.filter_op_map(op_map) + def build_op_map(op_pool, only_prebuilt_cls, forbidden_pattern_set): + op_map = dict() + for op in op_pool: + param_map, produced = op.get_prototype() + is_prebuilt_op = isinstance(op, engine.PrebuiltOperator) + if ( + (is_prebuilt_op or produced not in only_prebuilt_cls) + and not utils.match_base_cls(produced, forbidden_pattern_set) + ): + op_map.setdefault(produced, set()).add(op) + return op_map + + op_map = build_op_map(op_pool, only_prebuilt_cls, forbidden_pattern_set) + # Make sure that we only use what is available from now on + op_pool = set(itertools.chain.from_iterable(op_map.values())) # Restrict the production of some types to a set of operators. restricted_op_set = { op for op in op_pool - if any( - fnmatch.fnmatch(op.get_name(full_qual=True), pattern) - for pattern in restrict_list - ) + if utils.match_name(op.get_name(full_qual=True), restricted_pattern_set) } def apply_restrict(produced, op_set, restricted_op_set, cls_map): restricted_op_set = { @@ -482,10 +498,16 @@ the name of the parameter, the start value, stop value and step size.""") # Get the list of root operators root_op_set = set() for produced, op_set in op_map.items(): - # All producers of Result can be a root operator in the expressions - # we are going to build, i.e. the outermost function call - if utils.match_base_cls(produced, goal_pattern): - root_op_set.update(op_set) + # All producers of the goal types can be a root operator in the + # expressions we are going to build, i.e. the outermost function call + if type_goal_pattern: + if utils.match_base_cls(produced, type_goal_pattern): + root_op_set.update(op_set) + + if callable_goal_pattern: + for op in op_set: + if utils.match_name(op.get_name(full_qual=True), callable_goal_pattern): + root_op_set.add(op) # Sort for stable output root_op_list = sorted(root_op_set, key=lambda op: str(op.name)) @@ -495,27 +517,27 @@ the name of the parameter, the start value, stop value and step size.""") hidden_callable_set = adaptor.get_hidden_callable_set(op_map) # Only print once per parameters' tuple - @utils.once - def handle_non_produced(cls_name, consumer_name, param_name, callable_path): - # When reloading from the DB, we don't want to be annoyed with lots of - # output related to missing PrebuiltOperator - if load_db_path and not verbose: - return - info('Nothing can produce instances of {cls} needed for {consumer} (parameter "{param}", along path {path})'.format( - cls = cls_name, - consumer = consumer_name, - param = param_name, - path = ' -> '.join(engine.get_name(callable_) for callable_ in callable_path) - )) + if verbose: + @utils.once + def handle_non_produced(cls_name, consumer_name, param_name, callable_path): + info('Nothing can produce instances of {cls} needed for {consumer} (parameter "{param}", along path {path})'.format( + cls = cls_name, + consumer = consumer_name, + param = param_name, + path = ' -> '.join(engine.get_name(callable_) for callable_ in callable_path) + )) - @utils.once - def handle_cycle(path): - error('Cyclic dependency detected: {path}'.format( - path = ' -> '.join( - engine.get_name(callable_) - for callable_ in path - ) - )) + @utils.once + def handle_cycle(path): + error('Cyclic dependency detected: {path}'.format( + path = ' -> '.join( + engine.get_name(callable_) + for callable_ in path + ) + )) + else: + handle_non_produced = 'ignore' + handle_cycle = 'ignore' # Build the list of Expression that can be constructed from the set of # callables @@ -524,6 +546,13 @@ the name of the parameter, the start value, stop value and step size.""") non_produced_handler = handle_non_produced, cycle_handler = handle_cycle, )) + # First, sort with the fully qualified ID so we have the strongest stability + # possible from one run to another + testcase_list.sort(key=lambda expr: take_first(expr.get_id(full_qual=True, with_tags=True))) + # Then sort again according to what will be displayed. Since it is a stable + # sort, it will keep a stable order for IDs that look the same but actually + # differ in their hidden part + testcase_list.sort(key=lambda expr: take_first(expr.get_id(qual=False, with_tags=True))) # Only keep the Expression where the outermost (root) operator is defined # in one of the files that were explicitely specified on the command line. @@ -536,11 +565,12 @@ the name of the parameter, the start value, stop value and step size.""") if user_filter: testcase_list = [ testcase for testcase in testcase_list - if fnmatch.fnmatch(take_first(testcase.get_id( - # These options need to match what --dry-run gives - full_qual=verbose, + if utils.match_name(take_first(testcase.get_id( + # These options need to match what --dry-run gives (unless + # verbose is used) + full_qual=False, qual=False, - hidden_callable_set=hidden_callable_set)), user_filter) + hidden_callable_set=hidden_callable_set)), [user_filter]) ] if not testcase_list: @@ -550,11 +580,11 @@ the name of the parameter, the start value, stop value and step size.""") out('The following expressions will be executed:\n') for testcase in testcase_list: out(take_first(testcase.get_id( - full_qual=verbose, - qual=False, + full_qual=bool(verbose), + qual=bool(verbose), hidden_callable_set=hidden_callable_set ))) - if verbose: + if verbose >= 2: out(testcase.pretty_structure() + '\n') if dry_run: @@ -566,23 +596,22 @@ the name of the parameter, the start value, stop value and step size.""") db_loader = adaptor.get_db_loader() - out('\nArtifacts dir: {}'.format(artifact_dir)) + out('\nArtifacts dir: {}\n'.format(artifact_dir)) - for testcase in testcase_list: + # Apply the common subexpression elimination before trying to create the + # template scripts + executor_map = engine.Expression.get_executor_map(testcase_list) + + for testcase in executor_map.keys(): testcase_short_id = take_first(testcase.get_id( hidden_callable_set=hidden_callable_set, with_tags=False, full_qual=False, qual=False, )) - testcase_id = take_first(testcase.get_id( - hidden_callable_set=hidden_callable_set, - with_tags=False, - full_qual=True, - )) data = testcase.data - data['id'] = testcase_id + data['id'] = testcase_short_id data['uuid'] = testcase.uuid testcase_artifact_dir = pathlib.Path( @@ -596,8 +625,19 @@ the name of the parameter, the start value, stop value and step size.""") data['artifact_dir'] = artifact_dir data['testcase_artifact_dir'] = testcase_artifact_dir + with open(str(testcase_artifact_dir.joinpath('UUID')), 'wt') as f: + f.write(testcase.uuid + '\n') + with open(str(testcase_artifact_dir.joinpath('ID')), 'wt') as f: - f.write(testcase_id+'\n') + f.write(testcase_short_id+'\n') + + with open(str(testcase_artifact_dir.joinpath('STRUCTURE')), 'wt') as f: + f.write(take_first(testcase.get_id( + hidden_callable_set=hidden_callable_set, + with_tags=False, + full_qual=True, + )) + '\n\n') + f.write(testcase.pretty_structure()) with open( str(testcase_artifact_dir.joinpath('testcase_template.py')), @@ -615,10 +655,9 @@ the name of the parameter, the start value, stop value and step size.""") if only_template_scripts: return 0 - out('\n') result_map = collections.defaultdict(list) - for testcase, executor in engine.Expression.get_executor_map(testcase_list).items(): - exec_start_msg = 'Executing: {short_id}\n\nID: {full_id}\nArtifacts: {folder}'.format( + for testcase, executor in executor_map.items(): + exec_start_msg = 'Executing: {short_id}\n\nID: {full_id}\nArtifacts: {folder}\nUUID: {uuid_}'.format( short_id=take_first(testcase.get_id( hidden_callable_set=hidden_callable_set, full_qual=False, @@ -626,10 +665,11 @@ the name of the parameter, the start value, stop value and step size.""") )), full_id=take_first(testcase.get_id( - hidden_callable_set=hidden_callable_set, + hidden_callable_set=hidden_callable_set if not verbose else None, full_qual=True, )), - folder=testcase.data['testcase_artifact_dir'] + folder=testcase.data['testcase_artifact_dir'], + uuid_=testcase.uuid ).replace('\n', '\n# ') delim = '#' * (len(exec_start_msg.splitlines()[0]) + 2) @@ -687,8 +727,8 @@ the name of the parameter, the start value, stop value and step size.""") ), ) - prefix = 'Finished ' - out('{prefix}{id}{uuid}'.format( + prefix = 'Finished {uuid} '.format(uuid=get_uuid_str(result)) + out('{prefix}{id}'.format( id=result.get_id( full_qual=False, qual=False, @@ -697,10 +737,7 @@ the name of the parameter, the start value, stop value and step size.""") hidden_callable_set=hidden_callable_set, ).strip().replace('\n', '\n'+len(prefix)*' '), prefix=prefix, - uuid=get_uuid_str(result), )) - if verbose: - out('Full ID:{}'.format(result.get_id(full_qual=True))) out(adaptor.result_str(result)) result_list.append(result) @@ -726,8 +763,7 @@ the name of the parameter, the start value, stop value and step size.""") )[1]+'\n', ) - - with open(str(testcase_artifact_dir.joinpath('UUID')), 'wt') as f: + with open(str(testcase_artifact_dir.joinpath('VALUES_UUID')), 'wt') as f: for expr_val in result_list: if expr_val.value is not NoValue: f.write(expr_val.value_uuid + '\n') @@ -746,7 +782,8 @@ the name of the parameter, the start value, stop value and step size.""") db.to_path(db_path) out('#'*80) - info('Result summary:\n') + info('Artifacts dir: {}'.format(artifact_dir)) + info('Result summary:') # Display the results adaptor.process_results(result_map) diff --git a/tools/exekall/exekall/utils.py b/tools/exekall/exekall/utils.py index 871b496c231ddef87dba2036f634857432e60f6d..8bdbd90d0d6509c39fbe07a07248896e978c9519 100644 --- a/tools/exekall/exekall/utils.py +++ b/tools/exekall/exekall/utils.py @@ -46,10 +46,7 @@ def load_serial_from_db(db, uuid_seq=None, type_pattern_seq=None): ) def type_pattern_predicate(serial): - return any( - match_base_cls(type(serial.value), pattern) - for pattern in type_pattern_seq - ) + return match_base_cls(type(serial.value), type_pattern_seq) if type_pattern_seq and not uuid_seq: predicate = type_pattern_predicate @@ -66,15 +63,29 @@ def load_serial_from_db(db, uuid_seq=None, type_pattern_seq=None): return db.obj_store.get_by_predicate(predicate) -def match_base_cls(cls, pattern): +def match_base_cls(cls, pattern_list): # Match on the name of the class of the object and all its base classes - for base_cls in inspect.getmro(cls): + for base_cls in engine.get_mro(cls): base_cls_name = engine.get_name(base_cls, full_qual=True) - if fnmatch.fnmatch(base_cls_name, pattern): + if not base_cls_name: + continue + if any( + fnmatch.fnmatch(base_cls_name, pattern) + for pattern in pattern_list + ): return True return False +def match_name(name, pattern_list): + if name is None: + return False + return any( + fnmatch.fnmatch(name, pattern) + for pattern in pattern_list + ) + + def get_recursive_module_set(module_set, package_set): """Retrieve the set of all modules recurisvely imported from the modules in `module_set`, if they are (indirectly) part of one of the packages named in @@ -321,7 +332,7 @@ class ExekallFormatter(logging.Formatter): else: return self.default_fmt.format(record) -def setup_logging(log_level, debug_log_file=None, verbose=False): +def setup_logging(log_level, debug_log_file=None, verbose=0): logging.addLevelName(LOGGING_OUT_LEVEL, 'OUT') level=getattr(logging, log_level.upper())