diff --git a/external/devlib/devlib/__init__.py b/external/devlib/devlib/__init__.py index 933b0955fdd5578571bc82f0e8a1fac0882eee8a..e1c07e8573562f2d4ee2c0d17e4da6641ac06c97 100644 --- a/external/devlib/devlib/__init__.py +++ b/external/devlib/devlib/__init__.py @@ -56,7 +56,7 @@ from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection from devlib.utils.version import get_commit as __get_commit -__version__ = '1.0.dev1' +__version__ = '1.1.dev1' __commit = __get_commit() if __commit: diff --git a/external/devlib/devlib/bin/scripts/shutils.in b/external/devlib/devlib/bin/scripts/shutils.in index eba9d2d5b3d6c48f91a03b9350750b277784e571..37991a756e49bc349bbc26f3d785375b1a01b7d4 100755 --- a/external/devlib/devlib/bin/scripts/shutils.in +++ b/external/devlib/devlib/bin/scripts/shutils.in @@ -238,30 +238,49 @@ hotplug_online_all() { done } + +################################################################################ +# Scheduler +################################################################################ + +sched_get_kernel_attributes() { + MATCH=${1:-'.*'} + [ -d /proc/sys/kernel/ ] || exit 1 + $GREP '' /proc/sys/kernel/sched_* | \ + $SED -e 's|/proc/sys/kernel/sched_||' | \ + $GREP -e "$MATCH" +} + ################################################################################ # Misc ################################################################################ -read_tree_values() { +read_tree_tgz_b64() { BASEPATH=$1 MAXDEPTH=$2 + TMPBASE=$3 if [ ! -e $BASEPATH ]; then echo "ERROR: $BASEPATH does not exist" exit 1 fi - PATHS=$($BUSYBOX find $BASEPATH -follow -maxdepth $MAXDEPTH) - i=0 - for path in $PATHS; do - i=$(expr $i + 1) - if [ $i -gt 1 ]; then - break; - fi + cd $TMPBASE + TMP_FOLDER=$($BUSYBOX realpath $($BUSYBOX mktemp -d XXXXXX)) + + # 'tar' doesn't work as expected on debugfs, so copy the tree first to + # workaround the issue + cd $BASEPATH + for CUR_FILE in $($BUSYBOX find . -follow -type f -maxdepth $MAXDEPTH); do + $BUSYBOX cp --parents $CUR_FILE $TMP_FOLDER/ 2> /dev/null done - if [ $i -gt 1 ]; then - $BUSYBOX grep -s '' $PATHS - fi + + cd $TMP_FOLDER + $BUSYBOX tar cz * | $BUSYBOX base64 + + # Clean-up the tmp folder since we won't need it any more + cd $TMPBASE + rm -rf $TMP_FOLDER } get_linux_system_id() { @@ -334,8 +353,8 @@ ftrace_get_function_stats) hotplug_online_all) hotplug_online_all ;; -read_tree_values) - read_tree_values $* +read_tree_tgz_b64) + read_tree_tgz_b64 $* ;; get_linux_system_id) get_linux_system_id $* @@ -343,6 +362,9 @@ get_linux_system_id) get_android_system_id) get_android_system_id $* ;; +sched_get_kernel_attributes) + sched_get_kernel_attributes $* + ;; *) echo "Command [$CMD] not supported" exit -1 diff --git a/external/devlib/devlib/derived/fps.py b/external/devlib/devlib/derived/fps.py index b47d328b8db83b2dd518cff53fc4cb1daeb9ffdb..b7ef4cab4ba878f7e3670b3ba584b22df3a4963c 100644 --- a/external/devlib/devlib/derived/fps.py +++ b/external/devlib/devlib/derived/fps.py @@ -106,17 +106,17 @@ class DerivedGfxInfoStats(DerivedFpsStats): frame_count += 1 if start_vsync is None: - start_vsync = frame_data.Vsync_time_us - end_vsync = frame_data.Vsync_time_us + start_vsync = frame_data.Vsync_time_ns + end_vsync = frame_data.Vsync_time_ns - frame_time = frame_data.FrameCompleted_time_us - frame_data.IntendedVsync_time_us + frame_time = frame_data.FrameCompleted_time_ns - frame_data.IntendedVsync_time_ns pff = 1e9 / frame_time if pff > self.drop_threshold: per_frame_fps.append([pff]) if frame_count: duration = end_vsync - start_vsync - fps = (1e6 * frame_count) / float(duration) + fps = (1e9 * frame_count) / float(duration) else: duration = 0 fps = 0 @@ -133,15 +133,15 @@ class DerivedGfxInfoStats(DerivedFpsStats): def _process_with_pandas(self, measurements_csv): data = pd.read_csv(measurements_csv.path) data = data[data.Flags_flags == 0] - frame_time = data.FrameCompleted_time_us - data.IntendedVsync_time_us - per_frame_fps = (1e6 / frame_time) + frame_time = data.FrameCompleted_time_ns - data.IntendedVsync_time_ns + per_frame_fps = (1e9 / frame_time) keep_filter = per_frame_fps > self.drop_threshold per_frame_fps = per_frame_fps[keep_filter] per_frame_fps.name = 'fps' frame_count = data.index.size if frame_count > 1: - duration = data.Vsync_time_us.iloc[-1] - data.Vsync_time_us.iloc[0] + duration = data.Vsync_time_ns.iloc[-1] - data.Vsync_time_ns.iloc[0] fps = (1e9 * frame_count) / float(duration) else: duration = 0 diff --git a/external/devlib/devlib/exception.py b/external/devlib/devlib/exception.py index 0004a4bc76d559a9885fa0f5c976a8d222b6e218..2f5018c114e54833a5f7bb3449074be0d7fda618 100644 --- a/external/devlib/devlib/exception.py +++ b/external/devlib/devlib/exception.py @@ -105,6 +105,16 @@ class WorkerThreadError(DevlibError): super(WorkerThreadError, self).__init__(message) +class KernelConfigKeyError(KeyError, IndexError, DevlibError): + """ + Exception raised when a kernel config option cannot be found. + + It inherits from :exc:`IndexError` for backward compatibility, and + :exc:`KeyError` to behave like a regular mapping. + """ + pass + + def get_traceback(exc=None): """ Returns the string with the traceback for the specifiec exc diff --git a/external/devlib/devlib/instrument/__init__.py b/external/devlib/devlib/instrument/__init__.py index 9eaef7bda102948013ef195af90b14c41598ad47..600b6b6411b88bb39e2eec0b17c226d22aba40b9 100644 --- a/external/devlib/devlib/instrument/__init__.py +++ b/external/devlib/devlib/instrument/__init__.py @@ -97,20 +97,30 @@ _measurement_types = [ # covert without being familar with individual instruments. MeasurementType('time', 'seconds', 'time', conversions={ - 'time_us': lambda x: x * 1000000, - 'time_ms': lambda x: x * 1000, + 'time_us': lambda x: x * 1e6, + 'time_ms': lambda x: x * 1e3, + 'time_ns': lambda x: x * 1e9, } ), MeasurementType('time_us', 'microseconds', 'time', conversions={ - 'time': lambda x: x / 1000000, - 'time_ms': lambda x: x / 1000, + 'time': lambda x: x / 1e6, + 'time_ms': lambda x: x / 1e3, + 'time_ns': lambda x: x * 1e3, } ), MeasurementType('time_ms', 'milliseconds', 'time', conversions={ - 'time': lambda x: x / 1000, - 'time_us': lambda x: x * 1000, + 'time': lambda x: x / 1e3, + 'time_us': lambda x: x * 1e3, + 'time_ns': lambda x: x * 1e6, + } + ), + MeasurementType('time_ns', 'nanoseconds', 'time', + conversions={ + 'time': lambda x: x / 1e9, + 'time_ms': lambda x: x / 1e6, + 'time_us': lambda x: x / 1e3, } ), diff --git a/external/devlib/devlib/instrument/frames.py b/external/devlib/devlib/instrument/frames.py index 1ec85fb95c4a2eaf0b8bdb3c055982347ee198aa..d63472469df27079da73c6ecbf0526703c3699a5 100644 --- a/external/devlib/devlib/instrument/frames.py +++ b/external/devlib/devlib/instrument/frames.py @@ -82,7 +82,7 @@ class GfxInfoFramesInstrument(FramesInstrument): if entry == 'Flags': self.add_channel('Flags', MeasurementType('flags', 'flags')) else: - self.add_channel(entry, 'time_us') + self.add_channel(entry, 'time_ns') self.header = [chan.label for chan in self.channels.values()] diff --git a/external/devlib/devlib/module/cgroups.py b/external/devlib/devlib/module/cgroups.py index 5e442fd24665afd0781a275b3d70d6d4237a960a..71885a64caab5b714df3d27f8f686e59d3a7d1d9 100644 --- a/external/devlib/devlib/module/cgroups.py +++ b/external/devlib/devlib/module/cgroups.py @@ -262,8 +262,9 @@ class CGroup(object): # Control cgroup path self.directory = controller.mount_point + if name != '/': - self.directory = self.target.path.join(controller.mount_point, name[1:]) + self.directory = self.target.path.join(controller.mount_point, name.strip('/')) # Setup path for tasks file self.tasks_file = self.target.path.join(self.directory, 'tasks') @@ -432,16 +433,20 @@ class CgroupsModule(Module): .format(self.cgroup_root, self.target.shutils, cgroup, cmdline) - def run_into(self, cgroup, cmdline): + def run_into(self, cgroup, cmdline, as_root=None): """ Run the specified command into the specified CGroup :param cmdline: Command to be run into cgroup :param cgroup: Name of cgroup to run command into + :param as_root: Specify whether to run the command as root, if not + specified will default to whether the target is rooted. :returns: Output of command. """ + if as_root is None: + as_root = self.target.is_rooted cmd = self.run_into_cmd(cgroup, cmdline) - raw_output = self.target.execute(cmd) + raw_output = self.target.execute(cmd, as_root=as_root) # First line of output comes from shutils; strip it out. return raw_output.split('\n', 1)[1] diff --git a/external/devlib/devlib/module/sched.py b/external/devlib/devlib/module/sched.py index c391c019582d4f86621217945f990674472cacff..a05725bf2b0f22d75aac3e7de75b76859cb388bb 100644 --- a/external/devlib/devlib/module/sched.py +++ b/external/devlib/devlib/module/sched.py @@ -21,6 +21,7 @@ from past.builtins import basestring from devlib.module import Module from devlib.utils.misc import memoized +from devlib.utils.types import boolean class SchedProcFSNode(object): @@ -253,6 +254,109 @@ class SchedModule(Module): return SchedProcFSData.available(target) + def get_kernel_attributes(self, matching=None, check_exit_code=True): + """ + Get the value of scheduler attributes. + + :param matching: an (optional) substring to filter the scheduler + attributes to be returned. + + The scheduler exposes a list of tunable attributes under: + /proc/sys/kernel + all starting with the "sched_" prefix. + + This method returns a dictionary of all the "sched_" attributes exposed + by the target kernel, within the prefix removed. + It's possible to restrict the list of attributes by specifying a + substring to be matched. + + returns: a dictionary of scheduler tunables + """ + command = 'sched_get_kernel_attributes {}'.format( + matching if matching else '' + ) + output = self.target._execute_util(command, as_root=self.target.is_rooted, + check_exit_code=check_exit_code) + result = {} + for entry in output.strip().split('\n'): + if ':' not in entry: + continue + path, value = entry.strip().split(':', 1) + if value in ['0', '1']: + value = bool(int(value)) + elif value.isdigit(): + value = int(value) + result[path] = value + return result + + def set_kernel_attribute(self, attr, value, verify=True): + """ + Set the value of a scheduler attribute. + + :param attr: the attribute to set, without the "sched_" prefix + :param value: the value to set + :param verify: true to check that the requested value has been set + + :raise TargetError: if the attribute cannot be set + """ + if isinstance(value, bool): + value = '1' if value else '0' + elif isinstance(value, int): + value = str(value) + path = '/proc/sys/kernel/sched_' + attr + self.target.write_value(path, value, verify) + + @property + @memoized + def has_debug(self): + if self.target.config.get('SCHED_DEBUG') != 'y': + return False; + return self.target.file_exists('/sys/kernel/debug/sched_features') + + def get_features(self): + """ + Get the status of each sched feature + + :returns: a dictionary of features and their "is enabled" status + """ + if not self.has_debug: + raise RuntimeError("sched_features not available") + feats = self.target.read_value('/sys/kernel/debug/sched_features') + features = {} + for feat in feats.split(): + value = True + if feat.startswith('NO'): + feat = feat.replace('NO_', '', 1) + value = False + features[feat] = value + return features + + def set_feature(self, feature, enable, verify=True): + """ + Set the status of a specified scheduler feature + + :param feature: the feature name to set + :param enable: true to enable the feature, false otherwise + + :raise ValueError: if the specified enable value is not bool + :raise RuntimeError: if the specified feature cannot be set + """ + if not self.has_debug: + raise RuntimeError("sched_features not available") + feature = feature.upper() + feat_value = feature + if not boolean(enable): + feat_value = 'NO_' + feat_value + self.target.write_value('/sys/kernel/debug/sched_features', + feat_value, verify=False) + if not verify: + return + msg = 'Failed to set {}, feature not supported?'.format(feat_value) + features = self.get_features() + feat_value = features.get(feature, not enable) + if feat_value != enable: + raise RuntimeError(msg) + def get_cpu_sd_info(self, cpu): """ :returns: An object view of /proc/sys/kernel/sched_domain/cpu/* diff --git a/external/devlib/devlib/target.py b/external/devlib/devlib/target.py index 475c166bfe340eb91780290b6c7a668ae03387b7..ad0b6d06c4b255d72c7afd7dad4051d21d4d1215 100644 --- a/external/devlib/devlib/target.py +++ b/external/devlib/devlib/target.py @@ -13,6 +13,9 @@ # limitations under the License. # +import io +import base64 +import gzip import os import re import time @@ -27,13 +30,21 @@ import xml.dom.minidom import copy from collections import namedtuple, defaultdict from pipes import quote +from past.types import basestring +from numbers import Number +try: + from collections.abc import Mapping +except ImportError: + from collections import Mapping + +from enum import Enum from devlib.host import LocalConnection, PACKAGE_BIN_DIRECTORY from devlib.module import get_module from devlib.platform import Platform from devlib.exception import (DevlibTransientError, TargetStableError, TargetNotRespondingError, TimeoutError, - TargetTransientError) # pylint: disable=redefined-builtin + TargetTransientError, KernelConfigKeyError) # pylint: disable=redefined-builtin from devlib.utils.ssh import SshConnection from devlib.utils.android import AdbConnection, AndroidProperties, LogcatMonitor, adb_command, adb_disconnect, INTENT_FLAGS from devlib.utils.misc import memoized, isiterable, convert_new_lines @@ -684,23 +695,61 @@ class Target(object): timeout = duration + 10 self.execute('sleep {}'.format(duration), timeout=timeout) - def read_tree_values_flat(self, path, depth=1, check_exit_code=True): - command = 'read_tree_values {} {}'.format(quote(path), depth) + def read_tree_values_flat(self, path, depth=1, check_exit_code=True, + decode_unicode=True, strip_null_chars=True): + command = 'read_tree_tgz_b64 {} {} {}'.format(quote(path), depth, + quote(self.working_directory)) output = self._execute_util(command, as_root=self.is_rooted, check_exit_code=check_exit_code) - accumulator = defaultdict(list) - for entry in output.strip().split('\n'): - if ':' not in entry: - continue - path, value = entry.strip().split(':', 1) - accumulator[path].append(value) + result = {} + + # Unpack the archive in memory + tar_gz = base64.b64decode(output) + tar_gz_bytes = io.BytesIO(tar_gz) + tar_buf = gzip.GzipFile(fileobj=tar_gz_bytes).read() + tar_bytes = io.BytesIO(tar_buf) + with tarfile.open(fileobj=tar_bytes) as tar: + for member in tar.getmembers(): + try: + content_f = tar.extractfile(member) + # ignore exotic members like sockets + except Exception: + continue + # if it is a file and not a folder + if content_f: + content = content_f.read() + if decode_unicode: + try: + content = content.decode('utf-8').strip() + if strip_null_chars: + content = content.replace('\x00', '').strip() + except UnicodeDecodeError: + content = '' + + name = self.path.join(path, member.name) + result[name] = content - result = {k: '\n'.join(v).strip() for k, v in accumulator.items()} return result - def read_tree_values(self, path, depth=1, dictcls=dict, check_exit_code=True): - value_map = self.read_tree_values_flat(path, depth, check_exit_code) + def read_tree_values(self, path, depth=1, dictcls=dict, + check_exit_code=True, decode_unicode=True, + strip_null_chars=True): + """ + Reads the content of all files under a given tree + + :path: path to the tree + :depth: maximum tree depth to read + :dictcls: type of the dict used to store the results + :check_exit_code: raise an exception if the shutil command fails + :decode_unicode: decode the content of files as utf-8 + :strip_null_chars: remove '\x00' chars from the content of utf-8 + decoded files + + :returns: a tree-like dict with the content of files as leafs + """ + value_map = self.read_tree_values_flat(path, depth, check_exit_code, + decode_unicode, strip_null_chars) return _build_path_tree(value_map, path, self.path.sep, dictcls) # internal methods @@ -1722,8 +1771,56 @@ class KernelVersion(object): __repr__ = __str__ -class KernelConfig(object): +class HexInt(int): + """ + Subclass of :class:`int` that uses hexadecimal formatting by default. + """ + + def __new__(cls, val=0, base=16): + super_new = super(HexInt, cls).__new__ + if isinstance(val, Number): + return super_new(cls, val) + else: + return super_new(cls, val, base=base) + + def __str__(self): + return hex(self) + + +class KernelConfigTristate(Enum): + YES = 'y' + NO = 'n' + MODULE = 'm' + + def __bool__(self): + """ + Allow using this enum to represent bool Kconfig type, although it is + technically different from tristate. + """ + return self in (self.YES, self.MODULE) + + def __nonzero__(self): + """ + For Python 2.x compatibility. + """ + return self.__bool__() + + @classmethod + def from_str(cls, str_): + for state in cls: + if state.value == str_: + return state + raise ValueError('No kernel config tristate value matches "{}"'.format(str_)) + +class TypedKernelConfig(Mapping): + """ + Mapping-like typed version of :class:`KernelConfig`. + + Values are either :class:`str`, :class:`int`, + :class:`KernelConfigTristate`, or :class:`HexInt`. ``hex`` Kconfig type is + mapped to :class:`HexInt` and ``bool`` to :class:`KernelConfigTristate`. + """ not_set_regex = re.compile(r'# (\S+) is not set') @staticmethod @@ -1733,50 +1830,200 @@ class KernelConfig(object): name = 'CONFIG_' + name return name - def iteritems(self): - return iter(self._config.items()) + def __init__(self, mapping=None): + mapping = mapping if mapping is not None else {} + self._config = { + # Ensure we use the canonical name of the config keys for internal + # representation + self.get_config_name(k): v + for k, v in dict(mapping).items() + } + + @classmethod + def from_str(cls, text): + """ + Build a :class:`TypedKernelConfig` out of the string content of a + Kconfig file. + """ + return cls(cls._parse_text(text)) - def __init__(self, text): - self.text = text - self._config = {} - for line in text.split('\n'): + @staticmethod + def _val_to_str(val): + "Convert back values to Kconfig-style string value" + # Special case the gracefully handle the output of get() + if val is None: + return None + elif isinstance(val, KernelConfigTristate): + return val.value + elif isinstance(val, basestring): + return '"{}"'.format(val) + else: + return str(val) + + def __str__(self): + return '\n'.join( + '{}={}'.format(k, self._val_to_str(v)) + for k, v in self.items() + ) + + @staticmethod + def _parse_val(k, v): + """ + Parse a value of types handled by Kconfig: + * string + * bool + * tristate + * hex + * int + + Since bool cannot be distinguished from tristate, tristate is + always used. :meth:`KernelConfigTristate.__bool__` will allow using + it as a bool though, so it should not impact user code. + """ + if not v: + return None + + # Handle "string" type + if v.startswith('"'): + # Strip enclosing " + return v[1:-1] + + else: + try: + # Handles "bool" and "tristate" types + return KernelConfigTristate.from_str(v) + except ValueError: + pass + + try: + # Handles "int" type + return int(v) + except ValueError: + pass + + try: + # Handles "hex" type + return HexInt(v) + except ValueError: + pass + + # If no type could be parsed + raise ValueError('Could not parse Kconfig key: {}={}'.format( + k, v + ), k, v + ) + + @classmethod + def _parse_text(cls, text): + config = {} + for line in text.splitlines(): line = line.strip() + + # skip empty lines + if not line: + continue + if line.startswith('#'): - match = self.not_set_regex.search(line) + match = cls.not_set_regex.search(line) if match: - self._config[match.group(1)] = 'n' - elif '=' in line: + value = 'n' + name = match.group(1) + else: + continue + else: name, value = line.split('=', 1) - self._config[name.strip()] = value.strip() - def get(self, name, strict=False): + name = cls.get_config_name(name.strip()) + value = cls._parse_val(name, value.strip()) + config[name] = value + return config + + def __getitem__(self, name): name = self.get_config_name(name) - res = self._config.get(name) + try: + return self._config[name] + except KeyError: + raise KernelConfigKeyError( + "{} is not exposed in kernel config".format(name), + name + ) + + def __iter__(self): + return iter(self._config) - if not res and strict: - raise IndexError("{} is not exposed in target's config") + def __len__(self): + return len(self._config) - return self._config.get(name) + def __contains__(self, name): + name = self.get_config_name(name) + return name in self._config def like(self, name): regex = re.compile(name, re.I) - result = {} - for k, v in self._config.items(): - if regex.search(k): - result[k] = v - return result + return { + k: v for k, v in self.items() + if regex.search(k) + } + + def is_enabled(self, name): + return self.get(name) is KernelConfigTristate.YES + + def is_module(self, name): + return self.get(name) is KernelConfigTristate.MODULE + + def is_not_set(self, name): + return self.get(name) is KernelConfigTristate.NO + + def has(self, name): + return self.is_enabled(name) or self.is_module(name) + + +class KernelConfig(object): + """ + Backward compatibility shim on top of :class:`TypedKernelConfig`. + + This class does not provide a Mapping API and only return string values. + """ + + def __init__(self, text): + # Expose typed_config as a non-private attribute, so that user code + # needing it can get it from any existing producer of KernelConfig. + self.typed_config = TypedKernelConfig.from_str(text) + # Expose the original text for backward compatibility + self.text = text + + get_config_name = TypedKernelConfig.get_config_name + not_set_regex = TypedKernelConfig.not_set_regex + + def iteritems(self): + for k, v in self.typed_config.items(): + yield (k, self.typed_config._val_to_str(v)) + + def get(self, name, strict=False): + if strict: + val = self.typed_config[name] + else: + val = self.typed_config.get(name) + + return self.typed_config._val_to_str(val) + + def like(self, name): + return { + k: self.typed_config._val_to_str(v) + for k, v in self.typed_config.like(name).items() + } def is_enabled(self, name): - return self.get(name) == 'y' + return self.typed_config.is_enabled(name) def is_module(self, name): - return self.get(name) == 'm' + return self.typed_config.is_module(name) def is_not_set(self, name): - return self.get(name) == 'n' + return self.typed_config.is_not_set(name) def has(self, name): - return self.get(name) in ['m', 'y'] + return self.typed_config.has(name) class LocalLinuxTarget(LinuxTarget): diff --git a/external/devlib/devlib/trace/serial_trace.py b/external/devlib/devlib/trace/serial_trace.py index 227efbbd679ab230f387d4bde75e9dcff12ea7eb..b991a545d2f1d7cec3ec013835811d90c5c5159d 100644 --- a/external/devlib/devlib/trace/serial_trace.py +++ b/external/devlib/devlib/trace/serial_trace.py @@ -52,7 +52,8 @@ class SerialTraceCollector(TraceCollector): self._tmpfile = NamedTemporaryFile() - self._tmpfile.write("-------- Starting serial logging --------\n") + start_marker = "-------- Starting serial logging --------\n" + self._tmpfile.write(start_marker.encode('utf-8')) self._serial_target, self._conn = get_connection(port=self.serial_port, baudrate=self.baudrate, @@ -76,7 +77,8 @@ class SerialTraceCollector(TraceCollector): self._serial_target.close() del self._conn - self._tmpfile.write("-------- Stopping serial logging --------\n") + stop_marker = "-------- Stopping serial logging --------\n" + self._tmpfile.write(stop_marker.encode('utf-8')) self._collecting = False diff --git a/external/devlib/devlib/utils/android.py b/external/devlib/devlib/utils/android.py index bd221bddcd4ed196ff2b1972808e7fffdb36d483..153d1b09f51025ffebc7f9a63211f2741af2bd1f 100755 --- a/external/devlib/devlib/utils/android.py +++ b/external/devlib/devlib/utils/android.py @@ -28,6 +28,9 @@ import tempfile import subprocess from collections import defaultdict import pexpect +import xml.etree.ElementTree +import zipfile + from pipes import quote from devlib.exception import TargetTransientError, TargetStableError, HostError @@ -132,6 +135,7 @@ class ApkInfo(object): version_regex = re.compile(r"name='(?P[^']+)' versionCode='(?P[^']+)' versionName='(?P[^']+)'") name_regex = re.compile(r"name='(?P[^']+)'") permission_regex = re.compile(r"name='(?P[^']+)'") + activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P\w+)"') def __init__(self, path=None): self.path = path @@ -147,15 +151,7 @@ class ApkInfo(object): # pylint: disable=too-many-branches def parse(self, apk_path): _check_env() - command = [aapt, 'dump', 'badging', apk_path] - logger.debug(' '.join(command)) - try: - output = subprocess.check_output(command, stderr=subprocess.STDOUT) - if sys.version_info[0] == 3: - output = output.decode(sys.stdout.encoding or 'utf-8', 'replace') - except subprocess.CalledProcessError as e: - raise HostError('Error parsing APK file {}. `aapt` says:\n{}' - .format(apk_path, e.output)) + output = self._run([aapt, 'dump', 'badging', apk_path]) for line in output.split('\n'): if line.startswith('application-label:'): self.label = line.split(':')[1].strip().replace('\'', '') @@ -188,6 +184,50 @@ class ApkInfo(object): else: pass # not interested + self._apk_path = apk_path + self._activities = None + self._methods = None + + @property + def activities(self): + if self._activities is None: + cmd = [aapt, 'dump', 'xmltree', self._apk_path, + 'AndroidManifest.xml'] + matched_activities = self.activity_regex.finditer(self._run(cmd)) + self._activities = [m.group('name') for m in matched_activities] + return self._activities + + @property + def methods(self): + if self._methods is None: + with zipfile.ZipFile(self._apk_path, 'r') as z: + extracted = z.extract('classes.dex', tempfile.gettempdir()) + + dexdump = os.path.join(os.path.dirname(aapt), 'dexdump') + command = [dexdump, '-l', 'xml', extracted] + dump = self._run(command) + + xml_tree = xml.etree.ElementTree.fromstring(dump) + + package = next(i for i in xml_tree.iter('package') + if i.attrib['name'] == self.package) + + self._methods = [(meth.attrib['name'], klass.attrib['name']) + for klass in package.iter('class') + for meth in klass.iter('method')] + return self._methods + + def _run(self, command): + logger.debug(' '.join(command)) + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT) + if sys.version_info[0] == 3: + output = output.decode(sys.stdout.encoding or 'utf-8', 'replace') + except subprocess.CalledProcessError as e: + raise HostError('Error while running "{}":\n{}' + .format(command, e.output)) + return output + class AdbConnection(object): diff --git a/external/devlib/devlib/utils/rendering.py b/external/devlib/devlib/utils/rendering.py index a72a72ea7e280a2e1f8063923ba5d4df03509670..29c4beac9c5787debee6d342763a2673b56d2134 100644 --- a/external/devlib/devlib/utils/rendering.py +++ b/external/devlib/devlib/utils/rendering.py @@ -49,12 +49,12 @@ class FrameCollector(threading.Thread): self.refresh_period = None self.drop_threshold = None self.unresponsive_count = 0 - self.last_ready_time = None + self.last_ready_time = 0 self.exc = None self.header = None def run(self): - logger.debug('Surface flinger frame data collection started.') + logger.debug('Frame data collection started.') try: self.stop_signal.clear() fd, self.temp_file = tempfile.mkstemp() @@ -71,7 +71,7 @@ class FrameCollector(threading.Thread): except Exception as e: # pylint: disable=W0703 logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e)) self.exc = WorkerThreadError(self.name, sys.exc_info()) - logger.debug('Surface flinger frame data collection stopped.') + logger.debug('Frame data collection stopped.') def stop(self): self.stop_signal.set() @@ -133,7 +133,7 @@ class SurfaceFlingerFrameCollector(FrameCollector): def collect_frames(self, wfh): for activity in self.list(): if activity == self.view: - wfh.write(self.get_latencies(activity)) + wfh.write(self.get_latencies(activity).encode('utf-8')) def clear(self): self.target.execute('dumpsys SurfaceFlinger --latency-clear ') diff --git a/external/devlib/devlib/utils/ssh.py b/external/devlib/devlib/utils/ssh.py index 74ece6609e29c732b3b4b8e74624a500753259c8..27898b68390f454f0dc030851d37996a0dd812cc 100644 --- a/external/devlib/devlib/utils/ssh.py +++ b/external/devlib/devlib/utils/ssh.py @@ -41,7 +41,8 @@ from pexpect import EOF, TIMEOUT, spawn # pylint: disable=redefined-builtin,wrong-import-position from devlib.exception import (HostError, TargetStableError, TargetNotRespondingError, TimeoutError, TargetTransientError) -from devlib.utils.misc import which, strip_bash_colors, check_output, sanitize_cmd_template +from devlib.utils.misc import (which, strip_bash_colors, check_output, + sanitize_cmd_template, memoized) from devlib.utils.types import boolean @@ -253,7 +254,7 @@ class SshConnection(object): # simulate impatiently hitting ^C until command prompt appears logger.debug('Sending ^C') for _ in range(self.max_cancel_attempts): - self.conn.sendline(chr(3)) + self._sendline(chr(3)) if self.conn.prompt(0.1): return True return False @@ -267,15 +268,15 @@ class SshConnection(object): command = self.sudo_cmd.format(quote(command)) if log: logger.debug(command) - self.conn.sendline(command) + self._sendline(command) if self.password: index = self.conn.expect_exact([self.password_prompt, TIMEOUT], timeout=0.5) if index == 0: - self.conn.sendline(self.password) + self._sendline(self.password) else: # not as_root if log: logger.debug(command) - self.conn.sendline(command) + self._sendline(command) timed_out = self._wait_for_prompt(timeout) # the regex removes line breaks potential introduced when writing # command to shell. @@ -321,6 +322,21 @@ class SshConnection(object): except TimeoutError as e: raise TimeoutError(command_redacted, e.output) + def _sendline(self, command): + # Workaround for https://github.com/pexpect/pexpect/issues/552 + if len(command) == self._get_window_size()[1] - self._get_prompt_length(): + command += ' ' + self.conn.sendline(command) + + @memoized + def _get_prompt_length(self): + self.conn.sendline() + self.conn.prompt() + return len(self.conn.after) + + @memoized + def _get_window_size(self): + return self.conn.getwinsize() class TelnetConnection(SshConnection): diff --git a/external/trappy/trappy/base.py b/external/trappy/trappy/base.py index 385c8aac73fa5961bca0c5e631fc63849ed909ee..4b05de98c092632a0fbb76034c12fec319892525 100644 --- a/external/trappy/trappy/base.py +++ b/external/trappy/trappy/base.py @@ -190,26 +190,21 @@ class Base(object): self.line_array.append(line) self.data_array.append(data) - def string_cast(self, string, type): - """ Attempt to convert string to another type - - Here we attempt to cast string to a type. Currently only - integer conversion is supported with future expansion - left open to other types. + def string_cast_int(self, string): + """ + Attempt to convert string to an int :param string: The value to convert. :type string: str - - :param type: The type to convert to. - :type type: type """ - # Currently this function only supports int conversion - if type != int: - return - # Handle false-positives for negative numbers - if not string.lstrip("-").isdigit(): - return string - return int(string) + + try: + return int(string) + except ValueError: + try: + return int(string, base=16) + except ValueError: + return string def generate_data_dict(self, data_str): data_dict = {} @@ -234,7 +229,7 @@ class Base(object): data_dict[prev_key] += ' ' + field continue (key, value) = field.split('=', 1) - value = self.string_cast(value, int) + value = self.string_cast_int(value) data_dict[key] = value prev_key = key return data_dict diff --git a/external/workload-automation/MANIFEST.in b/external/workload-automation/MANIFEST.in index 9790e788ca2cb2165ada2a363e616c1b5143fb34..18a948c2a2425e06ad324e927c4297b17cbffd9f 100644 --- a/external/workload-automation/MANIFEST.in +++ b/external/workload-automation/MANIFEST.in @@ -1,2 +1,3 @@ recursive-include scripts * recursive-include doc * +recursive-include wa * diff --git a/external/workload-automation/doc/source/api/output.rst b/external/workload-automation/doc/source/api/output.rst index 0e474a33a7de305bbca4f1d0c2f2bf0434626ceb..5aea51ece6cf39acf2e5b203887365f8266088d3 100644 --- a/external/workload-automation/doc/source/api/output.rst +++ b/external/workload-automation/doc/source/api/output.rst @@ -23,6 +23,23 @@ iterating over all WA output directories found. :param path: must be the path to the top-level output directory (the one containing ``__meta`` subdirectory and ``run.log``). +WA output stored in a Postgres database by the ``Postgres`` output processor +can be accessed via a :class:`RunDatabaseOutput` which can be initialized as follows: + +.. class:: RunDatabaseOutput(password, host='localhost', user='postgres', port='5432', dbname='wa', run_uuid=None, list_runs=False) + + The main interface into Postgres database containing WA results. + + :param password: The password used to authenticate with + :param host: The database host address. Defaults to ``'localhost'`` + :param user: The user name used to authenticate with. Defaults to ``'postgres'`` + :param port: The database connection port number. Defaults to ``'5432'`` + :param dbname: The database name. Defaults to ``'wa'`` + :param run_uuid: The ``run_uuid`` to identify the selected run + :param list_runs: Will connect to the database and will print out the available runs + with their corresponding run_uuids. Defaults to ``False`` + + Example ------- @@ -39,6 +56,32 @@ called ``wa_output`` in the current working directory we can initialize a ...: output_directory = 'wa_output' ...: run_output = RunOutput(output_directory) +Alternatively if the results have been stored in a Postgres database we can +initialize a ``RunDatabaseOutput`` as follows: + +.. code-block:: python + + In [1]: from wa import RunDatabaseOutput + ...: + ...: db_settings = { + ...: host: 'localhost', + ...: port: '5432', + ...: dbname: 'wa' + ...: user: 'postgres', + ...: password: 'wa' + ...: } + ...: + ...: RunDatabaseOutput(list_runs=True, **db_settings) + Available runs are: + ========= ============ ============= =================== =================== ==================================== + Run Name Project Project Stage Start Time End Time run_uuid + ========= ============ ============= =================== =================== ==================================== + Test Run my_project None 2018-11-29 14:53:08 2018-11-29 14:53:24 aa3077eb-241a-41d3-9610-245fd4e552a9 + run_1 my_project None 2018-11-29 14:53:34 2018-11-29 14:53:37 4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a + ========= ============ ============= =================== =================== ==================================== + + In [2]: run_uuid = '4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a' + ...: run_output = RunDatabaseOutput(run_uuid=run_uuid, **db_settings) From here we can retrieve various information about the run. For example if we @@ -65,7 +108,7 @@ parameters and the metrics recorded from the first job was we can do the followi Out[5]: u'dhrystone' # Print out all the runtime parameters and their values for this job - In [6]: for k, v in job_1.spec.runtime_parameters.iteritems(): + In [6]: for k, v in job_1.spec.runtime_parameters.items(): ...: print (k, v) (u'airplane_mode': False) (u'brightness': 100) @@ -92,6 +135,15 @@ parameters and the metrics recorded from the first job was we can do the followi , ] + # Load the run results csv file into pandas + In [7]: pd.read_csv(run_output.get_artifact_path('run_result_csv')) + Out[7]: + id workload iteration metric value units + 0 450000-wk1 dhrystone 1 thread 0 score 1.442310e+07 NaN + 1 450000-wk1 dhrystone 1 thread 0 DMIPS 8.209700e+04 NaN + 2 450000-wk1 dhrystone 1 thread 1 score 1.442310e+07 NaN + 3 450000-wk1 dhrystone 1 thread 1 DMIPS 8.720900e+04 NaN + ... We can also retrieve information about the target that the run was performed on @@ -214,7 +266,7 @@ methods Return the :class:`Metric` associated with the run (not the individual jobs) with the specified `name`. - :return: The :class`Metric` object for the metric with the specified name. + :return: The :class:`Metric` object for the metric with the specified name. .. method:: RunOutput.get_job_spec(spec_id) @@ -232,6 +284,46 @@ methods :return: A list of `str` labels of workloads that were part of this run. +:class:`RunDatabaseOutput` +--------------------------- + +:class:`RunDatabaseOutput` provides access to the output of a WA :term:`run`, +including metrics,artifacts, metadata, and configuration stored in a postgres database. +The majority of attributes and methods are the same :class:`RunOutput` however the +noticeable differences are: + +``jobs`` + A list of :class:`JobDatabaseOutput` objects for each job that was executed + during the run. + +``basepath`` + A representation of the current database and host information backing this object. + +methods +~~~~~~~ + +.. method:: RunDatabaseOutput.get_artifact(name) + + Return the :class:`Artifact` specified by ``name``. This will only look + at the run artifacts; this will not search the artifacts of the individual + jobs. The `path` attribute of the :class:`Artifact` will be set to the Database OID of the object. + + :param name: The name of the artifact who's path to retrieve. + :return: The :class:`Artifact` with that name + :raises HostError: If the artifact with the specified name does not exist. + + +.. method:: RunDatabaseOutput.get_artifact_path(name) + + Returns a `StringIO` object containing the contents of the artifact + specified by ``name``. This will only look at the run artifacts; this will + not search the artifacts of the individual jobs. + + :param name: The name of the artifact who's path to retrieve. + :return: A `StringIO` object with the contents of the artifact + :raises HostError: If the artifact with the specified name does not exist. + + :class:`JobOutput` ------------------ @@ -311,11 +403,10 @@ methods Return the :class:`Artifact` specified by ``name`` associated with this job. - :param name: The name of the artifact who's path to retrieve. + :param name: The name of the artifact to retrieve. :return: The :class:`Artifact` with that name :raises HostError: If the artifact with the specified name does not exist. - .. method:: RunOutput.get_artifact_path(name) Return the path to the file backing the artifact specified by ``name``, @@ -325,13 +416,48 @@ methods :return: The path to the artifact :raises HostError: If the artifact with the specified name does not exist. - .. method:: RunOutput.get_metric(name) Return the :class:`Metric` associated with this job with the specified `name`. - :return: The :class`Metric` object for the metric with the specified name. + :return: The :class:`Metric` object for the metric with the specified name. + + +:class:`JobDatabaseOutput` +--------------------------- + +:class:`JobOutput` provides access to the output of a single :term:`job` +executed during a WA :term:`run`, including metrics, artifacts, metadata, and +configuration stored in a postgres database. +The majority of attributes and methods are the same :class:`JobOutput` however the +noticeable differences are: + +``basepath`` + A representation of the current database and host information backing this object. + + +methods +~~~~~~~ + +.. method:: JobDatabaseOutput.get_artifact(name) + + Return the :class:`Artifact` specified by ``name`` associated with this job. + The `path` attribute of the :class:`Artifact` will be set to the Database + OID of the object. + + :param name: The name of the artifact to retrieve. + :return: The :class:`Artifact` with that name + :raises HostError: If the artifact with the specified name does not exist. + +.. method:: JobDatabaseOutput.get_artifact_path(name) + + Returns a ``StringIO`` object containing the contents of the artifact + specified by ``name`` associated with this job. + + :param name: The name of the artifact who's path to retrieve. + :return: A `StringIO` object with the contents of the artifact + :raises HostError: If the artifact with the specified name does not exist. :class:`Metric` @@ -420,7 +546,7 @@ An :class:`Artifact` has the following attributes: it is the opposite of ``export``, but in general may also be discarded. - .. note:: whether a file is marked as ``log``/``data`` or ``raw`` + .. note:: Whether a file is marked as ``log``/``data`` or ``raw`` depends on how important it is to preserve this file, e.g. when archiving, vs how much space it takes up. Unlike ``export`` artifacts which are (almost) always diff --git a/external/workload-automation/doc/source/changes.rst b/external/workload-automation/doc/source/changes.rst index a8ce757320ec942bc26214f3152b805f08df0e02..982c8abcf129a5b2c7f80d7b02cae343216fdf56 100644 --- a/external/workload-automation/doc/source/changes.rst +++ b/external/workload-automation/doc/source/changes.rst @@ -2,9 +2,109 @@ What's New in Workload Automation ================================= -------------- +************* +Version 3.1.1 +************* + +Fixes/Improvements +================== + +Other +----- + - Improve formatting when displaying metrics + - Update revent binaries to include latest fixes + - Update DockerImage to use new released version of WA and Devlib + - Fix broken package on PyPi + +************* +Version 3.1.0 +************* + +New Features: +============== + +Commands +--------- + - ``create database``: Added :ref:`create subcommand ` + command in order to initialize a PostgresSQL database to allow for storing + WA output with the Postgres Output Processor. + +Output Processors: +------------------ + - ``Postgres``: Added output processor which can be used to populate a + Postgres database with the output generated from a WA run. + - ``logcat-regex``: Add new output processor to extract arbitrary "key" + "value" pairs from logcat. + +Configuration: +-------------- + - :ref:`Configuration Includes `: Add support for including + other YAML files inside agendas and config files using ``"include#:"`` + entries. + - :ref:`Section groups `: This allows for a ``group`` entry + to be specified for each section and will automatically cross product the + relevant sections with sections from other groups adding the relevant + classifiers. + +Framework: +---------- + - Added support for using the :ref:`OutputAPI ` with a + Postgres Database backend. Used to retrieve and + :ref:`process ` run data uploaded by the ``Postgres`` + output processor. + +Workloads: +---------- + - ``gfxbench-corporate``: Execute a set of on and offscreen graphical benchmarks from + GFXBench including Car Chase and Manhattan. + - ``glbench``: Measures the graphics performance of Android devices by + testing the underlying OpenGL (ES) implementation. + + +Fixes/Improvements +================== + +Framework: +---------- + - Remove quotes from ``sudo_cmd`` parameter default value due to changes in + devlib. + - Various Python 3 related fixes. + - Ensure plugin names are converted to identifiers internally to act more + consistently when dealing with names containing ``-``'s etc. + - Now correctly updates RunInfo with project and run name information. + - Add versioning support for POD structures with the ability to + automatically update data structures / formats to new versions. + +Commands: +--------- + - Fix revent target initialization. + - Fix revent argument validation. + +Workloads: +---------- + - ``Speedometer``: Close open tabs upon workload completion. + - ``jankbench``: Ensure that the logcat monitor thread is terminated + correctly to prevent left over adb processes. + - UiAutomator workloads are now able to dismiss android warning that a + workload has not been designed for the latest version of android. + +Other: +------ +- Report additional metadata about target, including: system_id, + page_size_kb. +- Uses cache directory to reduce target calls, e.g. will now use cached + version of TargetInfo if local copy is found. +- Update recommended :ref:`installation ` commands when installing from + github due to pip not following dependency links correctly. +- Fix incorrect parameter names in runtime parameter documentation. + + +-------------------------------------------------- + + +************* Version 3.0.0 -------------- +************* WA3 is a more or less from-scratch re-write of WA2. We have attempted to maintain configuration-level compatibility wherever possible (so WA2 agendas @@ -29,7 +129,7 @@ believe to be no longer useful. do the port yourselves :-) ). New Features -~~~~~~~~~~~~ +============ - Python 3 support. WA now runs on both Python 2 and Python 3. @@ -75,7 +175,7 @@ New Features .. _devlib: https://github.com/ARM-software/devlib Changes -~~~~~~~ +======= - Configuration files ``config.py`` are now specified in YAML format in ``config.yaml``. WA3 has support for automatic conversion of the default diff --git a/external/workload-automation/doc/source/developer_information/how_tos/processing_output.rst b/external/workload-automation/doc/source/developer_information/how_tos/processing_output.rst index 16d31a4769413c251c8da9d8e58cbb0635181a43..72151504d530b0c09b7446cba2e5c37fc404a05d 100644 --- a/external/workload-automation/doc/source/developer_information/how_tos/processing_output.rst +++ b/external/workload-automation/doc/source/developer_information/how_tos/processing_output.rst @@ -26,7 +26,8 @@ CPU frequency fixed to max, and once with CPU frequency fixed to min. Classifiers are used to indicate the configuration in the output. First, create the :class:`RunOutput` object, which is the main interface for -interacting with WA outputs. +interacting with WA outputs. Or alternatively a :class:`RunDatabaseOutput` +if storing your results in a postgres database. .. code-block:: python @@ -151,10 +152,6 @@ For the purposes of this report, they will be used to augment the metric's name. scores[workload][name][freq] = metric - rows = [] - for workload in sorted(scores.keys()): - wldata = scores[workload] - Once the metrics have been sorted, generate the report showing the delta between the two configurations (indicated by the "frequency" classifier) and highlight any unexpected deltas (based on the ``lower_is_better`` attribute of @@ -164,23 +161,27 @@ statically significant deltas.) .. code-block:: python - for name in sorted(wldata.keys()): - min_score = wldata[name]['min'].value - max_score = wldata[name]['max'].value - delta = max_score - min_score - units = wldata[name]['min'].units or '' - lib = wldata[name]['min'].lower_is_better + rows = [] + for workload in sorted(scores.keys()): + wldata = scores[workload] + + for name in sorted(wldata.keys()): + min_score = wldata[name]['min'].value + max_score = wldata[name]['max'].value + delta = max_score - min_score + units = wldata[name]['min'].units or '' + lib = wldata[name]['min'].lower_is_better - warn = '' - if (lib and delta > 0) or (not lib and delta < 0): - warn = '!!!' + warn = '' + if (lib and delta > 0) or (not lib and delta < 0): + warn = '!!!' - rows.append([workload, name, - '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), - '{:.3f}'.format(delta), units, warn]) + rows.append([workload, name, + '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), + '{:.3f}'.format(delta), units, warn]) - # separate workloads with a blank row - rows.append(['', '', '', '', '', '', '']) + # separate workloads with a blank row + rows.append(['', '', '', '', '', '', '']) write_table(rows, sys.stdout, align='<<>>><<', @@ -275,23 +276,23 @@ Below is the complete example code, and a report it generated for a sample run. for workload in sorted(scores.keys()): wldata = scores[workload] - for name in sorted(wldata.keys()): - min_score = wldata[name]['min'].value - max_score = wldata[name]['max'].value - delta = max_score - min_score - units = wldata[name]['min'].units or '' - lib = wldata[name]['min'].lower_is_better + for name in sorted(wldata.keys()): + min_score = wldata[name]['min'].value + max_score = wldata[name]['max'].value + delta = max_score - min_score + units = wldata[name]['min'].units or '' + lib = wldata[name]['min'].lower_is_better - warn = '' - if (lib and delta > 0) or (not lib and delta < 0): - warn = '!!!' + warn = '' + if (lib and delta > 0) or (not lib and delta < 0): + warn = '!!!' - rows.append([workload, name, - '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), - '{:.3f}'.format(delta), units, warn]) + rows.append([workload, name, + '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), + '{:.3f}'.format(delta), units, warn]) - # separate workloads with a blank row - rows.append(['', '', '', '', '', '', '']) + # separate workloads with a blank row + rows.append(['', '', '', '', '', '', '']) write_table(rows, sys.stdout, align='<<>>><<', diff --git a/external/workload-automation/doc/source/faq.rst b/external/workload-automation/doc/source/faq.rst index 1b5b739af0ae5e2c06e738af00456f6f4c356c4d..86a8fcccfc6c6ad6ad277ae50824a60bc5ce43f7 100644 --- a/external/workload-automation/doc/source/faq.rst +++ b/external/workload-automation/doc/source/faq.rst @@ -69,7 +69,40 @@ WA3 config file. **Q:** My Juno board keeps resetting upon starting WA even if it hasn't crashed. -------------------------------------------------------------------------------- -Please ensure that you do not have any other terminals (e.g. ``screen`` +**A** Please ensure that you do not have any other terminals (e.g. ``screen`` sessions) connected to the board's UART. When WA attempts to open the connection for its own use this can cause the board to reset if a connection is already present. + + +**Q:** I'm using the FPS instrument but I do not get any/correct results for my workload +----------------------------------------------------------------------------------------- + +**A:** If your device is running with Android 6.0 + then the default utility for +collecting fps metrics will be ``gfxinfo`` however this does not seem to be able +to extract any meaningful information for some workloads. In this case please +try setting the ``force_surfaceflinger`` parameter for the ``fps`` augmentation +to ``True``. This will attempt to guess the "View" for the workload +automatically however this is device specific and therefore may need +customizing. If this is required please open the application and execute +``dumpsys SurfaceFlinger --list`` on the device via adb. This will provide a +list of all views available for measuring. + +As an example, when trying to find the view for the AngryBirds Rio workload you +may get something like: + +.. code-block:: none + + ... + AppWindowToken{41dfe54 token=Token{77819a7 ActivityRecord{a151266 u0 com.rovio.angrybirdsrio/com.rovio.fusion.App t506}}}#0 + a3d001c com.rovio.angrybirdsrio/com.rovio.fusion.App#0 + Background for -SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0 + SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0 + com.rovio.angrybirdsrio/com.rovio.fusion.App#0 + boostedAnimationLayer#0 + mAboveAppWindowsContainers#0 + ... + +From these ``"SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0"`` is +the mostly likely the View that needs to be set as the ``view`` workload +parameter and will be picked up be the ``fps`` augmentation. diff --git a/external/workload-automation/doc/source/user_information/how_tos/agenda.rst b/external/workload-automation/doc/source/user_information/how_tos/agenda.rst index a386f15ac0242564434ed4e82c71faa4a48b7f2f..5bbec04d3a1fa34bea17cf6adc72dd056ea29f67 100644 --- a/external/workload-automation/doc/source/user_information/how_tos/agenda.rst +++ b/external/workload-automation/doc/source/user_information/how_tos/agenda.rst @@ -514,16 +514,16 @@ online, we can create an agenda as follows: - id: max_freq runtime_parameters: freq: max - group: frequency + group: frequency - id: min_cpus runtime_parameters: cpus: 1 - group: cpus + group: cpus - id: max_cpus runtime_parameters: cpus: 8 - group: cpus + group: cpus workloads: - dhrystone diff --git a/external/workload-automation/doc/source/user_information/user_reference/invocation.rst b/external/workload-automation/doc/source/user_information/user_reference/invocation.rst index 18e44a57f1f8410d5bb4bb0eb0f812106bedf122..ead05042132c3c70282cb194b9dcc843a286bf81 100644 --- a/external/workload-automation/doc/source/user_information/user_reference/invocation.rst +++ b/external/workload-automation/doc/source/user_information/user_reference/invocation.rst @@ -238,6 +238,33 @@ Which will produce something like:: This will be populated with default values which can then be customised for the particular use case. +Additionally the create command can be used to initialize (and update) a +Postgres database which can be used by the ``postgres`` output processor. + +The most of database connection parameters have a default value however they can +be overridden via command line arguments. When initializing the database WA will +also save the supplied parameters into the default user config file so that they +do not need to be specified time the output processor is used. + +As an example if we had a database server running on at 10.0.0.2 using the +standard port we could use the following command to initialize a database for +use with WA:: + + wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd + +This will log into the database server with the supplied credentials and create +a database (defaulting to 'wa') and will save the configuration to the +``~/.workload_automation/config.yaml`` file. + +With updates to WA there may be changes to the database schema used. In this +case the create command can also be used with the ``-U`` flag to update the +database to use the new schema as follows:: + + wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd -U + +This will upgrade the database sequentially until the database schema is using +the latest version. + .. _process-command: Process diff --git a/external/workload-automation/doc/source/user_information/user_reference/runtime_parameters.rst b/external/workload-automation/doc/source/user_information/user_reference/runtime_parameters.rst index f1ca0541fc22a6489a600fcbf5be20042ada3923..5e1472070cbf38dbf3a82c0cfbfde9cc53699449 100644 --- a/external/workload-automation/doc/source/user_information/user_reference/runtime_parameters.rst +++ b/external/workload-automation/doc/source/user_information/user_reference/runtime_parameters.rst @@ -98,7 +98,7 @@ CPUFreq :governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available. -:governor_tunable: A ``dict`` that can be used to specify governor +:gov_tunables: A ``dict`` that can be used to specify governor tunables for all cores, unlike the other common parameters these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. @@ -113,7 +113,7 @@ CPUFreq :_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'. -:_governor_tunable: A ``dict`` that can be used to specify governor +:_gov_tunables: A ``dict`` that can be used to specify governor tunables for cores of a particular type e.g. 'A72', these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. @@ -129,7 +129,7 @@ CPUFreq :cpu_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'. -:cpu_governor_tunable: A ``dict`` that can be used to specify governor +:cpu_gov_tunables: A ``dict`` that can be used to specify governor tunables for a particular core e.g. 'cpu0', these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. @@ -147,7 +147,7 @@ If big.LITTLE is detected for the device an additional set of parameters are ava :big_governor: A ``string`` that can be used to specify the governor for the big cores. -:big_governor_tunable: A ``dict`` that can be used to specify governor +:big_gov_tunables: A ``dict`` that can be used to specify governor tunables for the big cores, these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. @@ -162,7 +162,7 @@ If big.LITTLE is detected for the device an additional set of parameters are ava :little_governor: A ``string`` that can be used to specify the governor for the little cores. -:little_governor_tunable: A ``dict`` that can be used to specify governor +:little_gov_tunables: A ``dict`` that can be used to specify governor tunables for the little cores, these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. diff --git a/external/workload-automation/extras/Dockerfile b/external/workload-automation/extras/Dockerfile index cc0f7cf67980d971b8d74bf28693e0aea12e7898..4c97e2c5b55c4ae1b87cbcbb5441ec77a7587084 100644 --- a/external/workload-automation/extras/Dockerfile +++ b/external/workload-automation/extras/Dockerfile @@ -42,8 +42,8 @@ FROM ubuntu:17.10 # Please update the references below to use different versions of # devlib, WA or the Android SDK -ARG DEVLIB_REF=v1.0.0 -ARG WA_REF=v3.0.0 +ARG DEVLIB_REF=v1.1.0 +ARG WA_REF=v3.1.1 ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip RUN apt-get update diff --git a/external/workload-automation/setup.py b/external/workload-automation/setup.py index 1c2d8e68ed40090062abc06bbbf4e9b39488affc..04eb9a20dd1c64e56393b29b1f545d7bed7bbf12 100755 --- a/external/workload-automation/setup.py +++ b/external/workload-automation/setup.py @@ -83,13 +83,13 @@ params = dict( 'colorama', # Printing with colors 'pyYAML', # YAML-formatted agenda parsing 'requests', # Fetch assets over HTTP - 'devlib>=1.0.dev1', # Interacting with devices + 'devlib>=1.1.dev1', # Interacting with devices 'louie-latest', # callbacks dispatch 'wrapt', # better decorators 'pandas>=0.23.0', # Data analysis and manipulation 'future', # Python 2-3 compatiblity ], - dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-1.0.dev1'], + dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-1.1.dev1'], extras_require={ 'other': ['jinja2'], 'test': ['nose', 'mock'], @@ -104,6 +104,7 @@ params = dict( 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', ], ) diff --git a/external/workload-automation/wa/assets/bin/arm64/revent b/external/workload-automation/wa/assets/bin/arm64/revent index 41bebc6198a233860b079b71839421490aa86ed8..bde4e14ac612756ea286d995f8139385b1345d9b 100755 Binary files a/external/workload-automation/wa/assets/bin/arm64/revent and b/external/workload-automation/wa/assets/bin/arm64/revent differ diff --git a/external/workload-automation/wa/assets/bin/armeabi/revent b/external/workload-automation/wa/assets/bin/armeabi/revent index 87d8d511ed5d399eb84492e75ac9326d4bb5de92..722aa8b2445c546e8920bdfb25178b730369cedf 100755 Binary files a/external/workload-automation/wa/assets/bin/armeabi/revent and b/external/workload-automation/wa/assets/bin/armeabi/revent differ diff --git a/external/workload-automation/wa/commands/create.py b/external/workload-automation/wa/commands/create.py index 24931cfa6a7e08bb4b8cc070ba073304f1bbf2e5..7db6dd523dc8a85bbeb96a89ddf7771e79f0cce4 100644 --- a/external/workload-automation/wa/commands/create.py +++ b/external/workload-automation/wa/commands/create.py @@ -40,6 +40,7 @@ from wa.framework.exception import ConfigError, CommandError from wa.instruments.energy_measurement import EnergyInstrumentBackend from wa.utils.misc import (ensure_directory_exists as _d, capitalize, ensure_file_directory_exists as _f) +from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR from wa.utils.serializer import yaml @@ -54,14 +55,21 @@ class CreateDatabaseSubcommand(SubCommand): output processor. """ - schemafilepath = 'postgres_schema.sql' + schemafilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql') + schemaupdatefilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema_update_v{}.{}.sql') def __init__(self, *args, **kwargs): super(CreateDatabaseSubcommand, self).__init__(*args, **kwargs) self.sql_commands = None - self.schemaversion = None self.schema_major = None self.schema_minor = None + self.postgres_host = None + self.postgres_port = None + self.username = None + self.password = None + self.dbname = None + self.config_file = None + self.force = None def initialize(self, context): self.parser.add_argument( @@ -91,25 +99,35 @@ class CreateDatabaseSubcommand(SubCommand): self.parser.add_argument( '-x', '--schema-version', action='store_true', help='Display the current schema version.') + self.parser.add_argument( + '-U', '--upgrade', action='store_true', + help='Upgrade the database to use the latest schema version.') def execute(self, state, args): # pylint: disable=too-many-branches if not psycopg2: raise CommandError( 'The module psycopg2 is required for the wa ' + 'create database command.') - self.get_schema(self.schemafilepath) + + if args.dbname == 'postgres': + raise ValueError('Databasename to create cannot be postgres.') + + self._parse_args(args) + self.schema_major, self.schema_minor, self.sql_commands = get_schema(self.schemafilepath) # Display the version if needed and exit if args.schema_version: self.logger.info( - 'The current schema version is {}'.format(self.schemaversion)) + 'The current schema version is {}.{}'.format(self.schema_major, + self.schema_minor)) return - if args.dbname == 'postgres': - raise ValueError('Databasename to create cannot be postgres.') + if args.upgrade: + self.update_schema() + return # Open user configuration - with open(args.config_file, 'r') as config_file: + with open(self.config_file, 'r') as config_file: config = yaml.load(config_file) if 'postgres' in config and not args.force_update_config: raise CommandError( @@ -149,39 +167,158 @@ class CreateDatabaseSubcommand(SubCommand): # Attempt to create database try: - self.create_database(args) + self.create_database() except OperationalError as e: for handle in possible_connection_errors: predicate(e, handle) raise e # Update the configuration file - _update_configuration_file(args, config) - - def create_database(self, args): - _validate_version(args) - - _check_database_existence(args) - - _create_database_postgres(args) - - _apply_database_schema(args, self.sql_commands, self.schema_major, self.schema_minor) - - self.logger.debug( - "Successfully created the database {}".format(args.dbname)) - - def get_schema(self, schemafilepath): - postgres_output_processor_dir = os.path.dirname(__file__) - sqlfile = open(os.path.join( - postgres_output_processor_dir, schemafilepath)) - self.sql_commands = sqlfile.read() - sqlfile.close() - # Extract schema version - if self.sql_commands.startswith('--!VERSION'): - splitcommands = self.sql_commands.split('!ENDVERSION!\n') - self.schemaversion = splitcommands[0].strip('--!VERSION!') - (self.schema_major, self.schema_minor) = self.schemaversion.split('.') - self.sql_commands = splitcommands[1] + self._update_configuration_file(config) + + def create_database(self): + self._validate_version() + + self._check_database_existence() + + self._create_database_postgres() + + self._apply_database_schema(self.sql_commands, self.schema_major, self.schema_minor) + + self.logger.info( + "Successfully created the database {}".format(self.dbname)) + + def update_schema(self): + self._validate_version() + schema_major, schema_minor, _ = get_schema(self.schemafilepath) + meta_oid, current_major, current_minor = self._get_database_schema_version() + + while not (schema_major == current_major and schema_minor == current_minor): + current_minor = self._update_schema_minors(current_major, current_minor, meta_oid) + current_major, current_minor = self._update_schema_major(current_major, current_minor, meta_oid) + msg = "Database schema update of '{}' to v{}.{} complete" + self.logger.info(msg.format(self.dbname, schema_major, schema_minor)) + + def _update_schema_minors(self, major, minor, meta_oid): + # Upgrade all available minor versions + while True: + minor += 1 + schema_update = os.path.join(POSTGRES_SCHEMA_DIR, + self.schemaupdatefilepath.format(major, minor)) + if not os.path.exists(schema_update): + break + + _, _, sql_commands = get_schema(schema_update) + self._apply_database_schema(sql_commands, major, minor, meta_oid) + msg = "Updated the database schema to v{}.{}" + self.logger.debug(msg.format(major, minor)) + + # Return last existing update file version + return minor - 1 + + def _update_schema_major(self, current_major, current_minor, meta_oid): + current_major += 1 + schema_update = os.path.join(POSTGRES_SCHEMA_DIR, + self.schemaupdatefilepath.format(current_major, 0)) + if not os.path.exists(schema_update): + return (current_major - 1, current_minor) + + # Reset minor to 0 with major version bump + current_minor = 0 + _, _, sql_commands = get_schema(schema_update) + self._apply_database_schema(sql_commands, current_major, current_minor, meta_oid) + msg = "Updated the database schema to v{}.{}" + self.logger.debug(msg.format(current_major, current_minor)) + return (current_major, current_minor) + + def _validate_version(self): + conn = connect(user=self.username, + password=self.password, host=self.postgres_host, port=self.postgres_port) + if conn.server_version < 90400: + msg = 'Postgres version too low. Please ensure that you are using atleast v9.4' + raise CommandError(msg) + + def _get_database_schema_version(self): + conn = connect(dbname=self.dbname, user=self.username, + password=self.password, host=self.postgres_host, port=self.postgres_port) + cursor = conn.cursor() + cursor.execute('''SELECT + DatabaseMeta.oid, + DatabaseMeta.schema_major, + DatabaseMeta.schema_minor + FROM + DatabaseMeta;''') + return cursor.fetchone() + + def _check_database_existence(self): + try: + connect(dbname=self.dbname, user=self.username, + password=self.password, host=self.postgres_host, port=self.postgres_port) + except OperationalError as e: + # Expect an operational error (database's non-existence) + if not re.compile('FATAL: database ".*" does not exist').match(str(e)): + raise e + else: + if not self.force: + raise CommandError( + "Database {} already exists. ".format(self.dbname) + + "Please specify the -f flag to create it from afresh." + ) + + def _create_database_postgres(self): + conn = connect(dbname='postgres', user=self.username, + password=self.password, host=self.postgres_host, port=self.postgres_port) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + cursor = conn.cursor() + cursor.execute('DROP DATABASE IF EXISTS ' + self.dbname) + cursor.execute('CREATE DATABASE ' + self.dbname) + conn.commit() + cursor.close() + conn.close() + + def _apply_database_schema(self, sql_commands, schema_major, schema_minor, meta_uuid=None): + conn = connect(dbname=self.dbname, user=self.username, + password=self.password, host=self.postgres_host, port=self.postgres_port) + cursor = conn.cursor() + cursor.execute(sql_commands) + + if not meta_uuid: + extras.register_uuid() + meta_uuid = uuid.uuid4() + cursor.execute("INSERT INTO DatabaseMeta VALUES (%s, %s, %s)", + (meta_uuid, + schema_major, + schema_minor + )) + else: + cursor.execute("UPDATE DatabaseMeta SET schema_major = %s, schema_minor = %s WHERE oid = %s;", + (schema_major, + schema_minor, + meta_uuid + )) + + conn.commit() + cursor.close() + conn.close() + + def _update_configuration_file(self, config): + ''' Update the user configuration file with the newly created database's + configuration. + ''' + config['postgres'] = OrderedDict( + [('host', self.postgres_host), ('port', self.postgres_port), + ('dbname', self.dbname), ('username', self.username), ('password', self.password)]) + with open(self.config_file, 'w+') as config_file: + yaml.dump(config, config_file) + + def _parse_args(self, args): + self.postgres_host = args.postgres_host + self.postgres_port = args.postgres_port + self.username = args.username + self.password = args.password + self.dbname = args.dbname + self.config_file = args.config_file + self.force = args.force class CreateAgendaSubcommand(SubCommand): @@ -429,70 +566,3 @@ def get_class_name(name, postfix=''): def touch(path): with open(path, 'w') as _: # NOQA pass - - -def _validate_version(args): - conn = connect(user=args.username, - password=args.password, host=args.postgres_host, port=args.postgres_port) - if conn.server_version < 90400: - msg = 'Postgres version too low. Please ensure that you are using atleast v9.4' - raise CommandError(msg) - - -def _check_database_existence(args): - try: - connect(dbname=args.dbname, user=args.username, - password=args.password, host=args.postgres_host, port=args.postgres_port) - except OperationalError as e: - # Expect an operational error (database's non-existence) - if not re.compile('FATAL: database ".*" does not exist').match(str(e)): - raise e - else: - if not args.force: - raise CommandError( - "Database {} already exists. ".format(args.dbname) + - "Please specify the -f flag to create it from afresh." - ) - - -def _create_database_postgres(args): # pylint: disable=no-self-use - conn = connect(dbname='postgres', user=args.username, - password=args.password, host=args.postgres_host, port=args.postgres_port) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) - cursor = conn.cursor() - cursor.execute('DROP DATABASE IF EXISTS ' + args.dbname) - cursor.execute('CREATE DATABASE ' + args.dbname) - conn.commit() - cursor.close() - conn.close() - - -def _apply_database_schema(args, sql_commands, schema_major, schema_minor): - conn = connect(dbname=args.dbname, user=args.username, - password=args.password, host=args.postgres_host, port=args.postgres_port) - cursor = conn.cursor() - cursor.execute(sql_commands) - - extras.register_uuid() - cursor.execute("INSERT INTO DatabaseMeta VALUES (%s, %s, %s)", - ( - uuid.uuid4(), - schema_major, - schema_minor - ) - ) - - conn.commit() - cursor.close() - conn.close() - - -def _update_configuration_file(args, config): - ''' Update the user configuration file with the newly created database's - configuration. - ''' - config['postgres'] = OrderedDict( - [('host', args.postgres_host), ('port', args.postgres_port), - ('dbname', args.dbname), ('username', args.username), ('password', args.password)]) - with open(args.config_file, 'w+') as config_file: - yaml.dump(config, config_file) diff --git a/external/workload-automation/wa/commands/postgres_schema.sql b/external/workload-automation/wa/commands/postgres_schemas/postgres_schema.sql similarity index 84% rename from external/workload-automation/wa/commands/postgres_schema.sql rename to external/workload-automation/wa/commands/postgres_schemas/postgres_schema.sql index 26bca17ac8b002b8f2dccd4876f0d075b8bef4b1..7510c77857d51966894d4b4ace5513cee7c28df8 100644 --- a/external/workload-automation/wa/commands/postgres_schema.sql +++ b/external/workload-automation/wa/commands/postgres_schemas/postgres_schema.sql @@ -1,4 +1,4 @@ ---!VERSION!1.1!ENDVERSION! +--!VERSION!1.2!ENDVERSION! CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE EXTENSION IF NOT EXISTS "lo"; @@ -13,6 +13,7 @@ DROP TABLE IF EXISTS Metrics; DROP TABLE IF EXISTS Augmentations; DROP TABLE IF EXISTS Jobs_Augs; DROP TABLE IF EXISTS ResourceGetters; +DROP TABLE IF EXISTS Resource_Getters; DROP TABLE IF EXISTS Events; DROP TABLE IF EXISTS Targets; DROP TABLE IF EXISTS Jobs; @@ -42,6 +43,7 @@ CREATE TABLE Runs ( timestamp timestamp, run_name text, project text, + project_stage text, retry_on_status status_enum[], max_retries int, bail_on_init_failure boolean, @@ -49,7 +51,11 @@ CREATE TABLE Runs ( run_uuid uuid, start_time timestamp, end_time timestamp, + duration float, metadata jsonb, + _pod_version int, + _pod_serialization_version int, + state jsonb, PRIMARY KEY (oid) ); @@ -57,12 +63,14 @@ CREATE TABLE Jobs ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid), status status_enum, - retries int, + retry int, label text, job_id text, iterations int, workload_name text, metadata jsonb, + _pod_version int, + _pod_serialization_version int, PRIMARY KEY (oid) ); @@ -82,6 +90,12 @@ CREATE TABLE Targets ( kernel_sha1 text, kernel_config text[], sched_features text[], + page_size_kb int, + screen_resolution int[], + prop json, + android_id text, + _pod_version int, + _pod_serialization_version int, PRIMARY KEY (oid) ); @@ -91,10 +105,12 @@ CREATE TABLE Events ( job_oid uuid references Jobs(oid), timestamp timestamp, message text, + _pod_version int, + _pod_serialization_version int, PRIMARY KEY (oid) ); -CREATE TABLE ResourceGetters ( +CREATE TABLE Resource_Getters ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid), name text, @@ -123,6 +139,8 @@ CREATE TABLE Metrics ( value double precision, units text, lower_is_better boolean, + _pod_version int, + _pod_serialization_version int, PRIMARY KEY (oid) ); @@ -144,6 +162,8 @@ CREATE TABLE Artifacts ( large_object_uuid uuid NOT NULL references LargeObjects(oid), description text, kind text, + _pod_version int, + _pod_serialization_version int, PRIMARY KEY (oid) ); @@ -151,6 +171,8 @@ CREATE TABLE Classifiers ( oid uuid NOT NULL, artifact_oid uuid references Artifacts(oid), metric_oid uuid references Metrics(oid), + job_oid uuid references Jobs(oid), + run_oid uuid references Runs(oid), key text, value text, PRIMARY KEY (oid) @@ -161,7 +183,7 @@ CREATE TABLE Parameters ( run_oid uuid NOT NULL references Runs(oid), job_oid uuid references Jobs(oid), augmentation_oid uuid references Augmentations(oid), - resource_getter_oid uuid references ResourceGetters(oid), + resource_getter_oid uuid references Resource_Getters(oid), name text, value text, value_type text, diff --git a/external/workload-automation/wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql b/external/workload-automation/wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql new file mode 100644 index 0000000000000000000000000000000000000000..1c9822267e1d8bc7505e8629f0530e15d1b18fd8 --- /dev/null +++ b/external/workload-automation/wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql @@ -0,0 +1,30 @@ +ALTER TABLE resourcegetters RENAME TO resource_getters; + +ALTER TABLE classifiers ADD COLUMN job_oid uuid references Jobs(oid); +ALTER TABLE classifiers ADD COLUMN run_oid uuid references Runs(oid); + +ALTER TABLE targets ADD COLUMN page_size_kb int; +ALTER TABLE targets ADD COLUMN screen_resolution int[]; +ALTER TABLE targets ADD COLUMN prop text; +ALTER TABLE targets ADD COLUMN android_id text; +ALTER TABLE targets ADD COLUMN _pod_version int; +ALTER TABLE targets ADD COLUMN _pod_serialization_version int; + +ALTER TABLE jobs RENAME COLUMN retries TO retry; +ALTER TABLE jobs ADD COLUMN _pod_version int; +ALTER TABLE jobs ADD COLUMN _pod_serialization_version int; + +ALTER TABLE runs ADD COLUMN project_stage text; +ALTER TABLE runs ADD COLUMN state jsonb; +ALTER TABLE runs ADD COLUMN duration float; +ALTER TABLE runs ADD COLUMN _pod_version int; +ALTER TABLE runs ADD COLUMN _pod_serialization_version int; + +ALTER TABLE artifacts ADD COLUMN _pod_version int; +ALTER TABLE artifacts ADD COLUMN _pod_serialization_version int; + +ALTER TABLE events ADD COLUMN _pod_version int; +ALTER TABLE events ADD COLUMN _pod_serialization_version int; + +ALTER TABLE metrics ADD COLUMN _pod_version int; +ALTER TABLE metrics ADD COLUMN _pod_serialization_version int; diff --git a/external/workload-automation/wa/commands/show.py b/external/workload-automation/wa/commands/show.py index 3d673358aba28330b7404265e739a3c3bb26b124..45531a4c4d38b733a018a83c294fedcdb9a30754 100644 --- a/external/workload-automation/wa/commands/show.py +++ b/external/workload-automation/wa/commands/show.py @@ -20,7 +20,8 @@ import sys from subprocess import call, Popen, PIPE -from pipes import quote + +from devlib.utils.misc import escape_double_quotes from wa import Command from wa.framework import pluginloader @@ -86,7 +87,7 @@ class ShowCommand(Command): title = '.TH {}{} 7'.format(kind, plugin_name) output = '\n'.join([title, body]) - call('echo {} | man -l -'.format(quote(output)), shell=True) + call('echo "{}" | man -l -'.format(escape_double_quotes(output)), shell=True) else: print(rst_output) # pylint: disable=superfluous-parens diff --git a/external/workload-automation/wa/framework/configuration/core.py b/external/workload-automation/wa/framework/configuration/core.py index 2325181c8705710af8ac831bbcb404516d735fe5..c69341ff181b310d08d98667e8e84f52a8994da8 100644 --- a/external/workload-automation/wa/framework/configuration/core.py +++ b/external/workload-automation/wa/framework/configuration/core.py @@ -22,7 +22,7 @@ from wa.utils import log from wa.utils.misc import (get_article, merge_config_values) from wa.utils.types import (identifier, integer, boolean, list_of_strings, list_of, toggle_set, obj_dict, enum) -from wa.utils.serializer import is_pod +from wa.utils.serializer import is_pod, Podable # Mapping for kind conversion; see docs for convert_types below @@ -110,7 +110,9 @@ class status_list(list): list.append(self, str(item).upper()) -class LoggingConfig(dict): +class LoggingConfig(Podable, dict): + + _pod_serialization_version = 1 defaults = { 'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', @@ -121,9 +123,14 @@ class LoggingConfig(dict): @staticmethod def from_pod(pod): - return LoggingConfig(pod) + pod = LoggingConfig._upgrade_pod(pod) + pod_version = pod.pop('_pod_version') + instance = LoggingConfig(pod) + instance._pod_version = pod_version # pylint: disable=protected-access + return instance def __init__(self, config=None): + super(LoggingConfig, self).__init__() dict.__init__(self) if isinstance(config, dict): config = {identifier(k.lower()): v for k, v in config.items()} @@ -142,7 +149,14 @@ class LoggingConfig(dict): raise ValueError(config) def to_pod(self): - return self + pod = super(LoggingConfig, self).to_pod() + pod.update(self) + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod def expanded_path(path): @@ -347,8 +361,9 @@ def _to_pod(cfg_point, value): raise ValueError(msg.format(cfg_point.name, value)) -class Configuration(object): +class Configuration(Podable): + _pod_serialization_version = 1 config_points = [] name = '' @@ -357,7 +372,7 @@ class Configuration(object): @classmethod def from_pod(cls, pod): - instance = cls() + instance = super(Configuration, cls).from_pod(pod) for cfg_point in cls.config_points: if cfg_point.name in pod: value = pod.pop(cfg_point.name) @@ -370,6 +385,7 @@ class Configuration(object): return instance def __init__(self): + super(Configuration, self).__init__() for confpoint in self.config_points: confpoint.set_value(self, check_mandatory=False) @@ -393,12 +409,17 @@ class Configuration(object): cfg_point.validate(self) def to_pod(self): - pod = {} + pod = super(Configuration, self).to_pod() for cfg_point in self.config_points: value = getattr(self, cfg_point.name, None) pod[cfg_point.name] = _to_pod(cfg_point, value) return pod + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod + # This configuration for the core WA framework class MetaConfiguration(Configuration): diff --git a/external/workload-automation/wa/framework/configuration/execution.py b/external/workload-automation/wa/framework/configuration/execution.py index 8e1bb373374b13966691c74eca74c059f799c1df..9b65af2926d69dc92b049cf65aea7d5e4449ebb6 100644 --- a/external/workload-automation/wa/framework/configuration/execution.py +++ b/external/workload-automation/wa/framework/configuration/execution.py @@ -27,24 +27,35 @@ from wa.framework.configuration.plugin_cache import PluginCache from wa.framework.exception import NotFoundError from wa.framework.job import Job from wa.utils import log +from wa.utils.serializer import Podable -class CombinedConfig(object): +class CombinedConfig(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): - instance = CombinedConfig() + instance = super(CombinedConfig, CombinedConfig).from_pod(pod) instance.settings = MetaConfiguration.from_pod(pod.get('settings', {})) instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {})) return instance def __init__(self, settings=None, run_config=None): # pylint: disable=redefined-outer-name + super(CombinedConfig, self).__init__() self.settings = settings self.run_config = run_config def to_pod(self): - return {'settings': self.settings.to_pod(), - 'run_config': self.run_config.to_pod()} + pod = super(CombinedConfig, self).to_pod() + pod['settings'] = self.settings.to_pod() + pod['run_config'] = self.run_config.to_pod() + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod class ConfigManager(object): diff --git a/external/workload-automation/wa/framework/execution.py b/external/workload-automation/wa/framework/execution.py index ebcb6860d79f74289809eba5b08e125cbb405f73..ac386ba9d7fcaee8ce4c3d8ec957de18280b8423 100644 --- a/external/workload-automation/wa/framework/execution.py +++ b/external/workload-automation/wa/framework/execution.py @@ -376,12 +376,12 @@ class Executor(object): try: self.do_execute(context) except KeyboardInterrupt as e: - context.run_output.status = 'ABORTED' + context.run_output.status = Status.ABORTED log.log_error(e, self.logger) context.write_output() raise except Exception as e: - context.run_output.status = 'FAILED' + context.run_output.status = Status.FAILED log.log_error(e, self.logger) context.write_output() raise diff --git a/external/workload-automation/wa/framework/output.py b/external/workload-automation/wa/framework/output.py index c686b8c4a6d0d1dfdd52ec9174b8c5e4a4c57570..8ab10b6776726dfd162df2c1be3d80f52ca13aa2 100644 --- a/external/workload-automation/wa/framework/output.py +++ b/external/workload-automation/wa/framework/output.py @@ -13,23 +13,33 @@ # limitations under the License. # +try: + import psycopg2 + from psycopg2 import Error as Psycopg2Error +except ImportError: + psycopg2 = None + Psycopg2Error = None + import logging import os import shutil -from collections import OrderedDict +from collections import OrderedDict, defaultdict from copy import copy, deepcopy from datetime import datetime +from io import StringIO import devlib from wa.framework.configuration.core import JobSpec, Status from wa.framework.configuration.execution import CombinedConfig -from wa.framework.exception import HostError +from wa.framework.exception import HostError, SerializerSyntaxError, ConfigError from wa.framework.run import RunState, RunInfo from wa.framework.target.info import TargetInfo from wa.framework.version import get_wa_version_with_commit -from wa.utils.misc import touch, ensure_directory_exists, isiterable -from wa.utils.serializer import write_pod, read_pod +from wa.utils.doc import format_simple_table +from wa.utils.misc import touch, ensure_directory_exists, isiterable, format_ordered_dict +from wa.utils.postgres import get_schema_versions +from wa.utils.serializer import write_pod, read_pod, Podable, json from wa.utils.types import enum, numeric @@ -166,7 +176,35 @@ class Output(object): return os.path.basename(self.basepath) -class RunOutput(Output): +class RunOutputCommon(object): + ''' Split out common functionality to form a second base of + the RunOutput classes + ''' + @property + def run_config(self): + if self._combined_config: + return self._combined_config.run_config + + @property + def settings(self): + if self._combined_config: + return self._combined_config.settings + + def get_job_spec(self, spec_id): + for spec in self.job_specs: + if spec.id == spec_id: + return spec + return None + + def list_workloads(self): + workloads = [] + for job in self.jobs: + if job.label not in workloads: + workloads.append(job.label) + return workloads + + +class RunOutput(Output, RunOutputCommon): kind = 'run' @@ -207,16 +245,6 @@ class RunOutput(Output): path = os.path.join(self.basepath, '__failed') return ensure_directory_exists(path) - @property - def run_config(self): - if self._combined_config: - return self._combined_config.run_config - - @property - def settings(self): - if self._combined_config: - return self._combined_config.settings - @property def augmentations(self): run_augs = set([]) @@ -302,19 +330,6 @@ class RunOutput(Output): shutil.move(job_output.basepath, failed_path) job_output.basepath = failed_path - def get_job_spec(self, spec_id): - for spec in self.job_specs: - if spec.id == spec_id: - return spec - return None - - def list_workloads(self): - workloads = [] - for job in self.jobs: - if job.label not in workloads: - workloads.append(job.label) - return workloads - class JobOutput(Output): @@ -332,12 +347,14 @@ class JobOutput(Output): self.reload() -class Result(object): +class Result(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): - instance = Result() - instance.status = Status(pod['status']) + instance = super(Result, Result).from_pod(pod) + instance.status = Status.from_pod(pod['status']) instance.metrics = [Metric.from_pod(m) for m in pod['metrics']] instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']] instance.events = [Event.from_pod(e) for e in pod['events']] @@ -347,6 +364,7 @@ class Result(object): def __init__(self): # pylint: disable=no-member + super(Result, self).__init__() self.status = Status.NEW self.metrics = [] self.artifacts = [] @@ -430,21 +448,27 @@ class Result(object): self.metadata[key] = args[0] def to_pod(self): - return dict( - status=str(self.status), - metrics=[m.to_pod() for m in self.metrics], - artifacts=[a.to_pod() for a in self.artifacts], - events=[e.to_pod() for e in self.events], - classifiers=copy(self.classifiers), - metadata=deepcopy(self.metadata), - ) + pod = super(Result, self).to_pod() + pod['status'] = self.status.to_pod() + pod['metrics'] = [m.to_pod() for m in self.metrics] + pod['artifacts'] = [a.to_pod() for a in self.artifacts] + pod['events'] = [e.to_pod() for e in self.events] + pod['classifiers'] = copy(self.classifiers) + pod['metadata'] = deepcopy(self.metadata) + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + pod['status'] = Status(pod['status']).to_pod() + return pod ARTIFACT_TYPES = ['log', 'meta', 'data', 'export', 'raw'] ArtifactType = enum(ARTIFACT_TYPES) -class Artifact(object): +class Artifact(Podable): """ This is an artifact generated during execution/post-processing of a workload. Unlike metrics, this represents an actual artifact, such as a @@ -492,10 +516,16 @@ class Artifact(object): """ + _pod_serialization_version = 1 + @staticmethod def from_pod(pod): + pod = Artifact._upgrade_pod(pod) + pod_version = pod.pop('_pod_version') pod['kind'] = ArtifactType(pod['kind']) - return Artifact(**pod) + instance = Artifact(**pod) + instance._pod_version = pod_version # pylint: disable =protected-access + return instance def __init__(self, name, path, kind, description=None, classifiers=None): """" @@ -515,6 +545,7 @@ class Artifact(object): used to identify sub-tests). """ + super(Artifact, self).__init__() self.name = name self.path = path.replace('/', os.sep) if path is not None else path try: @@ -526,10 +557,16 @@ class Artifact(object): self.classifiers = classifiers or {} def to_pod(self): - pod = copy(self.__dict__) + pod = super(Artifact, self).to_pod() + pod.update(self.__dict__) pod['kind'] = str(self.kind) return pod + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod + def __str__(self): return self.path @@ -537,7 +574,7 @@ class Artifact(object): return '{} ({}): {}'.format(self.name, self.kind, self.path) -class Metric(object): +class Metric(Podable): """ This is a single metric collected from executing a workload. @@ -554,15 +591,20 @@ class Metric(object): to identify sub-tests). """ - __slots__ = ['name', 'value', 'units', 'lower_is_better', 'classifiers'] + _pod_serialization_version = 1 @staticmethod def from_pod(pod): - return Metric(**pod) + pod = Metric._upgrade_pod(pod) + pod_version = pod.pop('_pod_version') + instance = Metric(**pod) + instance._pod_version = pod_version # pylint: disable =protected-access + return instance def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None): + super(Metric, self).__init__() self.name = name self.value = numeric(value) self.units = units @@ -570,13 +612,18 @@ class Metric(object): self.classifiers = classifiers or {} def to_pod(self): - return dict( - name=self.name, - value=self.value, - units=self.units, - lower_is_better=self.lower_is_better, - classifiers=self.classifiers, - ) + pod = super(Metric, self).to_pod() + pod['name'] = self.name + pod['value'] = self.value + pod['units'] = self.units + pod['lower_is_better'] = self.lower_is_better + pod['classifiers'] = self.classifiers + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod def __str__(self): result = '{}: {}'.format(self.name, self.value) @@ -588,23 +635,27 @@ class Metric(object): def __repr__(self): text = self.__str__() if self.classifiers: - return '<{} {}>'.format(text, self.classifiers) + return '<{} {}>'.format(text, format_ordered_dict(self.classifiers)) else: return '<{}>'.format(text) -class Event(object): +class Event(Podable): """ An event that occured during a run. """ __slots__ = ['timestamp', 'message'] + _pod_serialization_version = 1 @staticmethod def from_pod(pod): + pod = Event._upgrade_pod(pod) + pod_version = pod.pop('_pod_version') instance = Event(pod['message']) instance.timestamp = pod['timestamp'] + instance._pod_version = pod_version # pylint: disable =protected-access return instance @property @@ -616,14 +667,20 @@ class Event(object): return result def __init__(self, message): + super(Event, self).__init__() self.timestamp = datetime.utcnow() - self.message = message + self.message = str(message) def to_pod(self): - return dict( - timestamp=self.timestamp, - message=self.message, - ) + pod = super(Event, self).to_pod() + pod['timestamp'] = self.timestamp + pod['message'] = self.message + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod def __str__(self): return '[{}] {}'.format(self.timestamp, self.message) @@ -690,3 +747,467 @@ def _save_raw_config(meta_dir, state): basename = os.path.basename(source) dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename)) shutil.copy(source, dest_path) + + +class DatabaseOutput(Output): + + kind = None + + @property + def resultfile(self): + if self.conn is None or self.oid is None: + return {} + pod = self._get_pod_version() + pod['metrics'] = self._get_metrics() + pod['status'] = self._get_status() + pod['classifiers'] = self._get_classifiers(self.oid, 'run') + pod['events'] = self._get_events() + pod['artifacts'] = self._get_artifacts() + return pod + + @staticmethod + def _build_command(columns, tables, conditions=None, joins=None): + cmd = '''SELECT\n\t{}\nFROM\n\t{}'''.format(',\n\t'.join(columns), ',\n\t'.join(tables)) + if joins: + for join in joins: + cmd += '''\nLEFT JOIN {} ON {}'''.format(join[0], join[1]) + if conditions: + cmd += '''\nWHERE\n\t{}'''.format('\nAND\n\t'.join(conditions)) + return cmd + ';' + + def __init__(self, conn, oid=None, reload=True): # pylint: disable=super-init-not-called + self.conn = conn + self.oid = oid + self.result = None + if reload: + self.reload() + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, self.oid) + + def __str__(self): + return self.oid + + def reload(self): + try: + self.result = Result.from_pod(self.resultfile) + except Exception as e: # pylint: disable=broad-except + self.result = Result() + self.result.status = Status.UNKNOWN + self.add_event(str(e)) + + def get_artifact_path(self, name): + artifact = self.get_artifact(name) + artifact = StringIO(self.conn.lobject(int(artifact.path)).read()) + self.conn.commit() + return artifact + + # pylint: disable=too-many-locals + def _read_db(self, columns, tables, conditions=None, join=None, as_dict=True): + # Automatically remove table name from column when using column names as keys or + # allow for column names to be aliases when retrieving the data, + # (db_column_name, alias) + db_columns = [] + aliases_colunms = [] + for column in columns: + if isinstance(column, tuple): + db_columns.append(column[0]) + aliases_colunms.append(column[1]) + else: + db_columns.append(column) + aliases_colunms.append(column.rsplit('.', 1)[-1]) + + cmd = self._build_command(db_columns, tables, conditions, join) + + logger.debug(cmd) + with self.conn.cursor() as cursor: + cursor.execute(cmd) + results = cursor.fetchall() + self.conn.commit() + + if not as_dict: + return results + + # Format the output dict using column names as keys + output = [] + for result in results: + entry = {} + for k, v in zip(aliases_colunms, result): + entry[k] = v + output.append(entry) + return output + + def _get_pod_version(self): + columns = ['_pod_version', '_pod_serialization_version'] + tables = ['{}s'.format(self.kind)] + conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)] + results = self._read_db(columns, tables, conditions) + if results: + return results[0] + else: + return None + + def _populate_classifers(self, pod, kind): + for entry in pod: + oid = entry.pop('oid') + entry['classifiers'] = self._get_classifiers(oid, kind) + return pod + + def _get_classifiers(self, oid, kind): + columns = ['classifiers.key', 'classifiers.value'] + tables = ['classifiers'] + conditions = ['{}_oid = \'{}\''.format(kind, oid)] + results = self._read_db(columns, tables, conditions, as_dict=False) + classifiers = {} + for (k, v) in results: + classifiers[k] = v + return classifiers + + def _get_metrics(self): + columns = ['metrics.name', 'metrics.value', 'metrics.units', + 'metrics.lower_is_better', + 'metrics.oid', 'metrics._pod_version', + 'metrics._pod_serialization_version'] + tables = ['metrics'] + joins = [('classifiers', 'classifiers.metric_oid = metrics.oid')] + conditions = ['metrics.{}_oid = \'{}\''.format(self.kind, self.oid)] + pod = self._read_db(columns, tables, conditions, joins) + return self._populate_classifers(pod, 'metric') + + def _get_status(self): + columns = ['{}s.status'.format(self.kind)] + tables = ['{}s'.format(self.kind)] + conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)] + results = self._read_db(columns, tables, conditions, as_dict=False) + if results: + return results[0][0] + else: + return None + + def _get_artifacts(self): + columns = ['artifacts.name', 'artifacts.description', 'artifacts.kind', + ('largeobjects.lo_oid', 'path'), 'artifacts.oid', + 'artifacts._pod_version', 'artifacts._pod_serialization_version'] + tables = ['largeobjects', 'artifacts'] + joins = [('classifiers', 'classifiers.artifact_oid = artifacts.oid')] + conditions = ['artifacts.{}_oid = \'{}\''.format(self.kind, self.oid), + 'artifacts.large_object_uuid = largeobjects.oid', + 'artifacts.job_oid IS NULL'] + pod = self._read_db(columns, tables, conditions, joins) + for artifact in pod: + artifact['path'] = str(artifact['path']) + return self._populate_classifers(pod, 'metric') + + def _get_events(self): + columns = ['events.message', 'events.timestamp'] + tables = ['events'] + conditions = ['events.{}_oid = \'{}\''.format(self.kind, self.oid)] + return self._read_db(columns, tables, conditions) + + +def kernel_config_from_db(raw): + kernel_config = {} + for k, v in zip(raw[0], raw[1]): + kernel_config[k] = v + return kernel_config + + +class RunDatabaseOutput(DatabaseOutput, RunOutputCommon): + + kind = 'run' + + @property + def basepath(self): + return 'db:({})-{}@{}:{}'.format(self.dbname, self.user, + self.host, self.port) + + @property + def augmentations(self): + columns = ['augmentations.name'] + tables = ['augmentations'] + conditions = ['augmentations.run_oid = \'{}\''.format(self.oid)] + results = self._read_db(columns, tables, conditions, as_dict=False) + return [a for augs in results for a in augs] + + @property + def _db_infofile(self): + columns = ['start_time', 'project', ('run_uuid', 'uuid'), 'end_time', + 'run_name', 'duration', '_pod_version', '_pod_serialization_version'] + tables = ['runs'] + conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] + pod = self._read_db(columns, tables, conditions) + if not pod: + return {} + return pod[0] + + @property + def _db_targetfile(self): + columns = ['os', 'is_rooted', 'target', 'abi', 'cpus', 'os_version', + 'hostid', 'hostname', 'kernel_version', 'kernel_release', + 'kernel_sha1', 'kernel_config', 'sched_features', + '_pod_version', '_pod_serialization_version'] + tables = ['targets'] + conditions = ['targets.run_oid = \'{}\''.format(self.oid)] + pod = self._read_db(columns, tables, conditions) + if not pod: + return {} + pod = pod[0] + try: + pod['cpus'] = [json.loads(cpu) for cpu in pod.pop('cpus')] + except SerializerSyntaxError: + pod['cpus'] = [] + logger.debug('Failed to deserialize target cpu information') + pod['kernel_config'] = kernel_config_from_db(pod['kernel_config']) + return pod + + @property + def _db_statefile(self): + # Read overall run information + columns = ['runs.state'] + tables = ['runs'] + conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] + pod = self._read_db(columns, tables, conditions) + pod = pod[0].get('state') + if not pod: + return {} + + # Read job information + columns = ['jobs.job_id', 'jobs.oid'] + tables = ['jobs'] + conditions = ['jobs.run_oid = \'{}\''.format(self.oid)] + job_oids = self._read_db(columns, tables, conditions) + + # Match job oid with jobs from state file + for job in pod.get('jobs', []): + for job_oid in job_oids: + if job['id'] == job_oid['job_id']: + job['oid'] = job_oid['oid'] + break + return pod + + @property + def _db_jobsfile(self): + workload_params = self._get_parameters('workload') + runtime_params = self._get_parameters('runtime') + + columns = [('jobs.job_id', 'id'), 'jobs.label', 'jobs.workload_name', + 'jobs.oid', 'jobs._pod_version', 'jobs._pod_serialization_version'] + tables = ['jobs'] + conditions = ['jobs.run_oid = \'{}\''.format(self.oid)] + jobs = self._read_db(columns, tables, conditions) + + for job in jobs: + job['workload_parameters'] = workload_params.pop(job['oid'], {}) + job['runtime_parameters'] = runtime_params.pop(job['oid'], {}) + job.pop('oid') + return jobs + + @property + def _db_run_config(self): + pod = defaultdict(dict) + parameter_types = ['augmentation', 'resource_getter'] + for parameter_type in parameter_types: + columns = ['parameters.name', 'parameters.value', + 'parameters.value_type', + ('{}s.name'.format(parameter_type), '{}'.format(parameter_type))] + tables = ['parameters', '{}s'.format(parameter_type)] + conditions = ['parameters.run_oid = \'{}\''.format(self.oid), + 'parameters.type = \'{}\''.format(parameter_type), + 'parameters.{0}_oid = {0}s.oid'.format(parameter_type)] + configs = self._read_db(columns, tables, conditions) + for config in configs: + entry = {config['name']: json.loads(config['value'])} + pod['{}s'.format(parameter_type)][config.pop(parameter_type)] = entry + + # run config + columns = ['runs.max_retries', 'runs.allow_phone_home', + 'runs.bail_on_init_failure', 'runs.retry_on_status'] + tables = ['runs'] + conditions = ['runs.oid = \'{}\''.format(self.oid)] + config = self._read_db(columns, tables, conditions) + if not config: + return {} + + config = config[0] + # Convert back into a string representation of an enum list + config['retry_on_status'] = config['retry_on_status'][1:-1].split(',') + pod.update(config) + return pod + + def __init__(self, + password=None, + dbname='wa', + host='localhost', + port='5432', + user='postgres', + run_uuid=None, + list_runs=False): + + if psycopg2 is None: + msg = 'Please install the psycopg2 in order to connect to postgres databases' + raise HostError(msg) + + self.dbname = dbname + self.host = host + self.port = port + self.user = user + self.password = password + self.run_uuid = run_uuid + self.conn = None + + self.info = None + self.state = None + self.result = None + self.target_info = None + self._combined_config = None + self.jobs = [] + self.job_specs = [] + + self.connect() + super(RunDatabaseOutput, self).__init__(conn=self.conn, reload=False) + + local_schema_version, db_schema_version = get_schema_versions(self.conn) + if local_schema_version != db_schema_version: + self.disconnect() + msg = 'The current database schema is v{} however the local ' \ + 'schema version is v{}. Please update your database ' \ + 'with the create command' + raise HostError(msg.format(db_schema_version, local_schema_version)) + + if list_runs: + print('Available runs are:') + self._list_runs() + self.disconnect() + return + if not self.run_uuid: + print('Please specify "Run uuid"') + self._list_runs() + self.disconnect() + return + + if not self.oid: + self.oid = self._get_oid() + self.reload() + + def read_job_specs(self): + job_specs = [] + for job in self._db_jobsfile: + job_specs.append(JobSpec.from_pod(job)) + return job_specs + + def connect(self): + if self.conn and not self.conn.closed: + return + try: + self.conn = psycopg2.connect(dbname=self.dbname, + user=self.user, + host=self.host, + password=self.password, + port=self.port) + except Psycopg2Error as e: + raise HostError('Unable to connect to the Database: "{}'.format(e.args[0])) + + def disconnect(self): + self.conn.commit() + self.conn.close() + + def reload(self): + super(RunDatabaseOutput, self).reload() + info_pod = self._db_infofile + state_pod = self._db_statefile + if not info_pod or not state_pod: + msg = '"{}" does not appear to be a valid WA Database Output.' + raise ValueError(msg.format(self.oid)) + + self.info = RunInfo.from_pod(info_pod) + self.state = RunState.from_pod(state_pod) + self._combined_config = CombinedConfig.from_pod({'run_config': self._db_run_config}) + self.target_info = TargetInfo.from_pod(self._db_targetfile) + self.job_specs = self.read_job_specs() + + for job_state in self._db_statefile['jobs']: + job = JobDatabaseOutput(self.conn, job_state.get('oid'), job_state['id'], + job_state['label'], job_state['iteration'], + job_state['retries']) + job.status = job_state['status'] + job.spec = self.get_job_spec(job.id) + if job.spec is None: + logger.warning('Could not find spec for job {}'.format(job.id)) + self.jobs.append(job) + + def _get_oid(self): + columns = ['{}s.oid'.format(self.kind)] + tables = ['{}s'.format(self.kind)] + conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] + oid = self._read_db(columns, tables, conditions, as_dict=False) + if not oid: + raise ConfigError('No matching run entries found for run_uuid {}'.format(self.run_uuid)) + if len(oid) > 1: + raise ConfigError('Multiple entries found for run_uuid: {}'.format(self.run_uuid)) + return oid[0][0] + + def _get_parameters(self, param_type): + columns = ['parameters.job_oid', 'parameters.name', 'parameters.value'] + tables = ['parameters'] + conditions = ['parameters.type = \'{}\''.format(param_type), + 'parameters.run_oid = \'{}\''.format(self.oid)] + params = self._read_db(columns, tables, conditions, as_dict=False) + parm_dict = defaultdict(dict) + for (job_oid, k, v) in params: + try: + parm_dict[job_oid][k] = json.loads(v) + except SerializerSyntaxError: + logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v)) + return parm_dict + + def _list_runs(self): + columns = ['runs.run_uuid', 'runs.run_name', 'runs.project', + 'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time'] + tables = ['runs'] + pod = self._read_db(columns, tables) + if pod: + headers = ['Run Name', 'Project', 'Project Stage', 'Start Time', 'End Time', + 'run_uuid'] + run_list = [] + for entry in pod: + # Format times to display better + start_time = entry['start_time'] + end_time = entry['end_time'] + if start_time: + start_time = start_time.strftime("%Y-%m-%d %H:%M:%S") + if end_time: + end_time = end_time.strftime("%Y-%m-%d %H:%M:%S") + + run_list.append([ + entry['run_name'], + entry['project'], + entry['project_stage'], + start_time, + end_time, + entry['run_uuid']]) + + print(format_simple_table(run_list, headers)) + else: + print('No Runs Found') + + +class JobDatabaseOutput(DatabaseOutput): + + kind = 'job' + + def __init__(self, conn, oid, job_id, label, iteration, retry): + super(JobDatabaseOutput, self).__init__(conn, oid=oid) + self.id = job_id + self.label = label + self.iteration = iteration + self.retry = retry + self.result = None + self.spec = None + self.reload() + + def __repr__(self): + return '<{} {}-{}-{}>'.format(self.__class__.__name__, + self.id, self.label, self.iteration) + + def __str__(self): + return '{}-{}-{}'.format(self.id, self.label, self.iteration) diff --git a/external/workload-automation/wa/framework/run.py b/external/workload-automation/wa/framework/run.py index 1acad2498a91bab7445e2d12172c5be387e02044..7509e2b8a2c531ae939ea4fb19912f2eb0e2fad1 100644 --- a/external/workload-automation/wa/framework/run.py +++ b/external/workload-automation/wa/framework/run.py @@ -22,27 +22,34 @@ from copy import copy from datetime import datetime, timedelta from wa.framework.configuration.core import Status +from wa.utils.serializer import Podable -class RunInfo(object): +class RunInfo(Podable): """ Information about the current run, such as its unique ID, run time, etc. """ + _pod_serialization_version = 1 + @staticmethod def from_pod(pod): + pod = RunInfo._upgrade_pod(pod) uid = pod.pop('uuid') + _pod_version = pod.pop('_pod_version') duration = pod.pop('duration') if uid is not None: uid = uuid.UUID(uid) instance = RunInfo(**pod) + instance._pod_version = _pod_version # pylint: disable=protected-access instance.uuid = uid instance.duration = duration if duration is None else timedelta(seconds=duration) return instance def __init__(self, run_name=None, project=None, project_stage=None, start_time=None, end_time=None, duration=None): + super(RunInfo, self).__init__() self.uuid = uuid.uuid4() self.run_name = run_name self.project = project @@ -52,7 +59,8 @@ class RunInfo(object): self.duration = duration def to_pod(self): - d = copy(self.__dict__) + d = super(RunInfo, self).to_pod() + d.update(copy(self.__dict__)) d['uuid'] = str(self.uuid) if self.duration is None: d['duration'] = self.duration @@ -60,16 +68,23 @@ class RunInfo(object): d['duration'] = self.duration.total_seconds() return d + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod + -class RunState(object): +class RunState(Podable): """ Represents the state of a WA run. """ + _pod_serialization_version = 1 + @staticmethod def from_pod(pod): - instance = RunState() - instance.status = Status(pod['status']) + instance = super(RunState, RunState).from_pod(pod) + instance.status = Status.from_pod(pod['status']) instance.timestamp = pod['timestamp'] jss = [JobState.from_pod(j) for j in pod['jobs']] instance.jobs = OrderedDict(((js.id, js.iteration), js) for js in jss) @@ -81,6 +96,7 @@ class RunState(object): if js.status > Status.RUNNING) def __init__(self): + super(RunState, self).__init__() self.jobs = OrderedDict() self.status = Status.NEW self.timestamp = datetime.utcnow() @@ -101,18 +117,28 @@ class RunState(object): return counter def to_pod(self): - return OrderedDict( - status=str(self.status), - timestamp=self.timestamp, - jobs=[j.to_pod() for j in self.jobs.values()], - ) + pod = super(RunState, self).to_pod() + pod['status'] = self.status.to_pod() + pod['timestamp'] = self.timestamp + pod['jobs'] = [j.to_pod() for j in self.jobs.values()] + return pod + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + pod['status'] = Status(pod['status']).to_pod() + return pod -class JobState(object): + +class JobState(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): - instance = JobState(pod['id'], pod['label'], pod['iteration'], Status(pod['status'])) + pod = JobState._upgrade_pod(pod) + instance = JobState(pod['id'], pod['label'], pod['iteration'], + Status.from_pod(pod['status'])) instance.retries = pod['retries'] instance.timestamp = pod['timestamp'] return instance @@ -123,6 +149,7 @@ class JobState(object): def __init__(self, id, label, iteration, status): # pylint: disable=redefined-builtin + super(JobState, self).__init__() self.id = id self.label = label self.iteration = iteration @@ -131,11 +158,17 @@ class JobState(object): self.timestamp = datetime.utcnow() def to_pod(self): - return OrderedDict( - id=self.id, - label=self.label, - iteration=self.iteration, - status=str(self.status), - retries=0, - timestamp=self.timestamp, - ) + pod = super(JobState, self).to_pod() + pod['id'] = self.id + pod['label'] = self.label + pod['iteration'] = self.iteration + pod['status'] = self.status.to_pod() + pod['retries'] = 0 + pod['timestamp'] = self.timestamp + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + pod['status'] = Status(pod['status']).to_pod() + return pod diff --git a/external/workload-automation/wa/framework/target/info.py b/external/workload-automation/wa/framework/target/info.py index a247aaeb5d8a4d4acfbb4a8af686f9ab6c200d1b..5b11f7d83521540f26a35db0ace3ed0b56e475a0 100644 --- a/external/workload-automation/wa/framework/target/info.py +++ b/external/workload-automation/wa/framework/target/info.py @@ -15,7 +15,6 @@ # pylint: disable=protected-access import os -from copy import copy from devlib import AndroidTarget, TargetError from devlib.target import KernelConfig, KernelVersion, Cpuinfo @@ -23,7 +22,7 @@ from devlib.utils.android import AndroidProperties from wa.framework.configuration.core import settings from wa.framework.exception import ConfigError -from wa.utils.serializer import read_pod, write_pod +from wa.utils.serializer import read_pod, write_pod, Podable def cpuinfo_from_pod(pod): @@ -65,20 +64,32 @@ def kernel_config_from_pod(pod): return config -class CpufreqInfo(object): +class CpufreqInfo(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): + pod = CpufreqInfo._upgrade_pod(pod) return CpufreqInfo(**pod) def __init__(self, **kwargs): + super(CpufreqInfo, self).__init__() self.available_frequencies = kwargs.pop('available_frequencies', []) self.available_governors = kwargs.pop('available_governors', []) self.related_cpus = kwargs.pop('related_cpus', []) self.driver = kwargs.pop('driver', None) + self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version) def to_pod(self): - return copy(self.__dict__) + pod = super(CpufreqInfo, self).to_pod() + pod.update(self.__dict__) + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod def __repr__(self): return 'Cpufreq({} {})'.format(self.driver, self.related_cpus) @@ -86,20 +97,32 @@ class CpufreqInfo(object): __str__ = __repr__ -class IdleStateInfo(object): +class IdleStateInfo(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): + pod = IdleStateInfo._upgrade_pod(pod) return IdleStateInfo(**pod) def __init__(self, **kwargs): + super(IdleStateInfo, self).__init__() self.name = kwargs.pop('name', None) self.desc = kwargs.pop('desc', None) self.power = kwargs.pop('power', None) self.latency = kwargs.pop('latency', None) + self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version) def to_pod(self): - return copy(self.__dict__) + pod = super(IdleStateInfo, self).to_pod() + pod.update(self.__dict__) + return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod def __repr__(self): return 'IdleState({}/{})'.format(self.name, self.desc) @@ -107,11 +130,15 @@ class IdleStateInfo(object): __str__ = __repr__ -class CpuidleInfo(object): +class CpuidleInfo(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): + pod = CpuidleInfo._upgrade_pod(pod) instance = CpuidleInfo() + instance._pod_version = pod['_pod_version'] instance.governor = pod['governor'] instance.driver = pod['driver'] instance.states = [IdleStateInfo.from_pod(s) for s in pod['states']] @@ -122,17 +149,23 @@ class CpuidleInfo(object): return len(self.states) def __init__(self): + super(CpuidleInfo, self).__init__() self.governor = None self.driver = None self.states = [] def to_pod(self): - pod = {} + pod = super(CpuidleInfo, self).to_pod() pod['governor'] = self.governor pod['driver'] = self.driver pod['states'] = [s.to_pod() for s in self.states] return pod + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod + def __repr__(self): return 'Cpuidle({}/{} {} states)'.format( self.governor, self.driver, self.num_states) @@ -140,11 +173,13 @@ class CpuidleInfo(object): __str__ = __repr__ -class CpuInfo(object): +class CpuInfo(Podable): + + _pod_serialization_version = 1 @staticmethod def from_pod(pod): - instance = CpuInfo() + instance = super(CpuInfo, CpuInfo).from_pod(pod) instance.id = pod['id'] instance.name = pod['name'] instance.architecture = pod['architecture'] @@ -154,6 +189,7 @@ class CpuInfo(object): return instance def __init__(self): + super(CpuInfo, self).__init__() self.id = None self.name = None self.architecture = None @@ -162,7 +198,7 @@ class CpuInfo(object): self.cpuidle = CpuidleInfo() def to_pod(self): - pod = {} + pod = super(CpuInfo, self).to_pod() pod['id'] = self.id pod['name'] = self.name pod['architecture'] = self.architecture @@ -171,6 +207,11 @@ class CpuInfo(object): pod['cpuidle'] = self.cpuidle.to_pod() return pod + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + return pod + def __repr__(self): return 'Cpu({} {})'.format(self.id, self.name) @@ -254,10 +295,10 @@ def get_target_info_from_cache(system_id): if not pod: return None - pod_version = pod.get('format_version', 0) - if pod_version != TargetInfo.format_version: + _pod_version = pod.get('_pod_version', 0) + if _pod_version != TargetInfo._pod_serialization_version: msg = 'Target info version mismatch. Expected {}, but found {}.\nTry deleting {}' - raise ConfigError(msg.format(TargetInfo.format_version, pod_version, + raise ConfigError(msg.format(TargetInfo._pod_serialization_version, _pod_version, settings.target_info_cache_file)) return TargetInfo.from_pod(pod) @@ -270,13 +311,13 @@ def cache_target_info(target_info, overwrite=False): write_target_info_cache(cache) -class TargetInfo(object): +class TargetInfo(Podable): - format_version = 2 + _pod_serialization_version = 2 @staticmethod def from_pod(pod): - instance = TargetInfo() + instance = super(TargetInfo, TargetInfo).from_pod(pod) instance.target = pod['target'] instance.abi = pod['abi'] instance.cpus = [CpuInfo.from_pod(c) for c in pod['cpus']] @@ -300,6 +341,7 @@ class TargetInfo(object): return instance def __init__(self): + super(TargetInfo, self).__init__() self.target = None self.cpus = [] self.os = None @@ -318,8 +360,7 @@ class TargetInfo(object): self.page_size_kb = None def to_pod(self): - pod = {} - pod['format_version'] = self.format_version + pod = super(TargetInfo, self).to_pod() pod['target'] = self.target pod['abi'] = self.abi pod['cpus'] = [c.to_pod() for c in self.cpus] @@ -341,3 +382,22 @@ class TargetInfo(object): pod['android_id'] = self.android_id return pod + + @staticmethod + def _pod_upgrade_v1(pod): + pod['_pod_version'] = pod.get('_pod_version', 1) + pod['cpus'] = pod.get('cpus', []) + pod['system_id'] = pod.get('system_id') + pod['hostid'] = pod.get('hostid') + pod['hostname'] = pod.get('hostname') + pod['sched_features'] = pod.get('sched_features') + pod['screen_resolution'] = pod.get('screen_resolution', (0, 0)) + pod['prop'] = pod.get('prop') + pod['android_id'] = pod.get('android_id') + return pod + + @staticmethod + def _pod_upgrade_v2(pod): + pod['page_size_kb'] = pod.get('page_size_kb') + pod['_pod_version'] = pod.get('format_version', 0) + return pod diff --git a/external/workload-automation/wa/framework/target/runtime_config.py b/external/workload-automation/wa/framework/target/runtime_config.py index 2728a8310add8d337ad170196beb9af8e594223d..7fd7d54b1e1979841c328fa4e702cc93cf427802 100644 --- a/external/workload-automation/wa/framework/target/runtime_config.py +++ b/external/workload-automation/wa/framework/target/runtime_config.py @@ -370,13 +370,14 @@ class CpufreqRuntimeConfig(RuntimeConfig): The governor to be set for all cores """) - param_name = 'governor_tunables' + param_name = 'gov_tunables' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=dict, merge=True, setter=self.set_governor_tunables, setter_params={'core': None}, + aliases=['governor_tunables'], description=""" The governor tunables to be set for all cores """) diff --git a/external/workload-automation/wa/framework/target/runtime_parameter_manager.py b/external/workload-automation/wa/framework/target/runtime_parameter_manager.py index 605b56d72eeadc5ea455b9222d09e90774793f3e..c46235507ec3c4e04190fca6ffe7e94c9d2f9c80 100644 --- a/external/workload-automation/wa/framework/target/runtime_parameter_manager.py +++ b/external/workload-automation/wa/framework/target/runtime_parameter_manager.py @@ -97,6 +97,6 @@ class RuntimeParameterManager(object): def get_cfg_point(self, name): name = caseless_string(name) for k, v in self.runtime_params.items(): - if name == k: + if name == k or name in v.cfg_point.aliases: return v.cfg_point raise ConfigError('Unknown runtime parameter: {}'.format(name)) diff --git a/external/workload-automation/wa/framework/version.py b/external/workload-automation/wa/framework/version.py index 584fa3bddf6eb92faef10968efb99d23bbefb197..44f9aee62b762fcc4ef113189a35cc0b88289bcf 100644 --- a/external/workload-automation/wa/framework/version.py +++ b/external/workload-automation/wa/framework/version.py @@ -19,14 +19,16 @@ from collections import namedtuple from subprocess import Popen, PIPE -VersionTuple = namedtuple('Version', ['major', 'minor', 'revision']) +VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev']) -version = VersionTuple(3, 0, 'dev1') +version = VersionTuple(3, 1, 1, 'dev1') def get_wa_version(): version_string = '{}.{}.{}'.format( version.major, version.minor, version.revision) + if version.dev: + version_string += '.{}'.format(version.dev) return version_string diff --git a/external/workload-automation/wa/framework/workload.py b/external/workload-automation/wa/framework/workload.py index 954f3fed4c1e786b91ceca211ea4e2143952a892..f5d0e1354f45da4330512d10710f4f472c297514 100644 --- a/external/workload-automation/wa/framework/workload.py +++ b/external/workload-automation/wa/framework/workload.py @@ -235,6 +235,12 @@ class ApkWorkload(Workload): will fall back to the version on the target if available. If ``False`` then the version on the target is preferred instead. """), + Parameter('view', kind=str, default=None, merge=True, + description=""" + Manually override the 'View' of the workload for use with + instruments such as the ``fps`` instrument. If not specified, + a workload dependant 'View' will be automatically generated. + """), ] @property diff --git a/external/workload-automation/wa/instruments/fps.py b/external/workload-automation/wa/instruments/fps.py index 2c5dbc9a43e90edb05402d48b9fa0e6a0a84a1b9..b0dc51f763c77516e50033e29d5049a12958dfce 100644 --- a/external/workload-automation/wa/instruments/fps.py +++ b/external/workload-automation/wa/instruments/fps.py @@ -164,7 +164,7 @@ class FpsInstrument(Instrument): os.remove(entry) if not frame_count.value: - context.add_event('Could not frind frames data in gfxinfo output') + context.add_event('Could not find frames data in gfxinfo output') context.set_status('PARTIAL') self.check_for_crash(context, fps.value, frame_count.value, diff --git a/external/workload-automation/wa/output_processors/postgresql.py b/external/workload-automation/wa/output_processors/postgresql.py index 16d25667773d0c145186027ef1c1b41ffa900d39..a2fa27b86d9e66106db16bb7f6d36b1e0d215330 100644 --- a/external/workload-automation/wa/output_processors/postgresql.py +++ b/external/workload-automation/wa/output_processors/postgresql.py @@ -16,7 +16,6 @@ import os import uuid import collections -import inspect try: import psycopg2 @@ -27,11 +26,14 @@ except ImportError as e: import_error_msg = e.args[0] if e.args else str(e) from devlib.target import KernelVersion, KernelConfig -import wa -from wa.utils import postgres_convert from wa import OutputProcessor, Parameter, OutputProcessorError -from wa.utils.types import level from wa.framework.target.info import CpuInfo +from wa.utils.postgres import (POSTGRES_SCHEMA_DIR, cast_level, cast_vanilla, + adapt_vanilla, return_as_is, adapt_level, + ListOfLevel, adapt_ListOfX, create_iterable_adapter, + get_schema_versions) +from wa.utils.serializer import json +from wa.utils.types import level class PostgresqlResultProcessor(OutputProcessor): @@ -43,10 +45,8 @@ class PostgresqlResultProcessor(OutputProcessor): The structure of this database can easily be understood by examining the postgres_schema.sql file (the schema used to generate it): {} - """.format(os.path.join( - os.path.dirname(inspect.getfile(wa)), - 'commands', - 'postgres_schema.sql')) + """.format(os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql')) + parameters = [ Parameter('username', default='postgres', description=""" @@ -84,19 +84,23 @@ class PostgresqlResultProcessor(OutputProcessor): # Commands sql_command = { - "create_run": "INSERT INTO Runs (oid, event_summary, basepath, status, timestamp, run_name, project, retry_on_status, max_retries, bail_on_init_failure, allow_phone_home, run_uuid, start_time, metadata) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", - "update_run": "UPDATE Runs SET event_summary=%s, status=%s, timestamp=%s, end_time=%s WHERE oid=%s;", - "create_job": "INSERT INTO Jobs (oid, run_oid, status, retries, label, job_id, iterations, workload_name, metadata) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);", - "create_target": "INSERT INTO Targets (oid, run_oid, target, cpus, os, os_version, hostid, hostname, abi, is_rooted, kernel_version, kernel_release, kernel_sha1, kernel_config, sched_features) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", - "create_event": "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message) VALUES (%s, %s, %s, %s, %s)", - "create_artifact": "INSERT INTO Artifacts (oid, run_oid, job_oid, name, large_object_uuid, description, kind) VALUES (%s, %s, %s, %s, %s, %s, %s)", - "create_metric": "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better) VALUES (%s, %s, %s, %s , %s, %s, %s)", + "create_run": "INSERT INTO Runs (oid, event_summary, basepath, status, timestamp, run_name, project, project_stage, retry_on_status, max_retries, bail_on_init_failure, allow_phone_home, run_uuid, start_time, metadata, state, _pod_version, _pod_serialization_version) " + "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", + "update_run": "UPDATE Runs SET event_summary=%s, status=%s, timestamp=%s, end_time=%s, duration=%s, state=%s WHERE oid=%s;", + "create_job": "INSERT INTO Jobs (oid, run_oid, status, retry, label, job_id, iterations, workload_name, metadata, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);", + "create_target": "INSERT INTO Targets (oid, run_oid, target, cpus, os, os_version, hostid, hostname, abi, is_rooted, kernel_version, kernel_release, kernel_sha1, kernel_config, sched_features, page_size_kb, screen_resolution, prop, android_id, _pod_version, _pod_serialization_version) " + "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", + "create_event": "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s", + "create_artifact": "INSERT INTO Artifacts (oid, run_oid, job_oid, name, large_object_uuid, description, kind, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", + "create_metric": "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s , %s, %s, %s)", "create_augmentation": "INSERT INTO Augmentations (oid, run_oid, name) VALUES (%s, %s, %s)", - "create_classifier": "INSERT INTO Classifiers (oid, artifact_oid, metric_oid, key, value) VALUES (%s, %s, %s, %s, %s)", - "create_parameter": "INSERT INTO Parameters (oid, run_oid, job_oid, augmentation_oid, resource_getter_oid, name, value, value_type, type) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", - "create_resource_getter": "INSERT INTO ResourceGetters (oid, run_oid, name) VALUES (%s, %s, %s)", + "create_classifier": "INSERT INTO Classifiers (oid, artifact_oid, metric_oid, job_oid, run_oid, key, value) VALUES (%s, %s, %s, %s, %s, %s, %s)", + "create_parameter": "INSERT INTO Parameters (oid, run_oid, job_oid, augmentation_oid, resource_getter_oid, name, value, value_type, type) " + "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", + "create_resource_getter": "INSERT INTO Resource_Getters (oid, run_oid, name) VALUES (%s, %s, %s)", "create_job_aug": "INSERT INTO Jobs_Augs (oid, job_oid, augmentation_oid) VALUES (%s, %s, %s)", - "create_large_object": "INSERT INTO LargeObjects (oid, lo_oid) VALUES (%s, %s)"} + "create_large_object": "INSERT INTO LargeObjects (oid, lo_oid) VALUES (%s, %s)" + } # Lists to track which run-related items have already been added metrics_already_added = [] @@ -123,34 +127,37 @@ class PostgresqlResultProcessor(OutputProcessor): # N.B. Typecasters are for postgres->python and adapters the opposite self.connect_to_database() self.cursor = self.conn.cursor() + self.verify_schema_versions() + # Register the adapters and typecasters for enum types self.cursor.execute("SELECT NULL::status_enum") status_oid = self.cursor.description[0][1] self.cursor.execute("SELECT NULL::param_enum") param_oid = self.cursor.description[0][1] LEVEL = psycopg2.extensions.new_type( - (status_oid,), "LEVEL", postgres_convert.cast_level) + (status_oid,), "LEVEL", cast_level) psycopg2.extensions.register_type(LEVEL) PARAM = psycopg2.extensions.new_type( - (param_oid,), "PARAM", postgres_convert.cast_vanilla) + (param_oid,), "PARAM", cast_vanilla) psycopg2.extensions.register_type(PARAM) - psycopg2.extensions.register_adapter(level, postgres_convert.return_as_is(postgres_convert.adapt_level)) + psycopg2.extensions.register_adapter(level, return_as_is(adapt_level)) psycopg2.extensions.register_adapter( - postgres_convert.ListOfLevel, postgres_convert.adapt_ListOfX(postgres_convert.adapt_level)) - psycopg2.extensions.register_adapter(KernelVersion, postgres_convert.adapt_vanilla) + ListOfLevel, adapt_ListOfX(adapt_level)) + psycopg2.extensions.register_adapter(KernelVersion, adapt_vanilla) psycopg2.extensions.register_adapter( - CpuInfo, postgres_convert.adapt_vanilla) + CpuInfo, adapt_vanilla) psycopg2.extensions.register_adapter( collections.OrderedDict, extras.Json) psycopg2.extensions.register_adapter(dict, extras.Json) psycopg2.extensions.register_adapter( - KernelConfig, postgres_convert.create_iterable_adapter(2, explicit_iterate=True)) + KernelConfig, create_iterable_adapter(2, explicit_iterate=True)) # Register ready-made UUID type adapter extras.register_uuid() + # Insert a run_uuid which will be globally accessible during the run self.run_uuid = uuid.UUID(str(uuid.uuid4())) run_output = context.run_output - retry_on_status = postgres_convert.ListOfLevel(run_output.run_config.retry_on_status) + retry_on_status = ListOfLevel(run_output.run_config.retry_on_status) self.cursor.execute( self.sql_command['create_run'], ( @@ -161,35 +168,52 @@ class PostgresqlResultProcessor(OutputProcessor): run_output.state.timestamp, run_output.info.run_name, run_output.info.project, + run_output.info.project_stage, retry_on_status, run_output.run_config.max_retries, run_output.run_config.bail_on_init_failure, run_output.run_config.allow_phone_home, run_output.info.uuid, run_output.info.start_time, - run_output.metadata)) + run_output.metadata, + json.dumps(run_output.state.to_pod()), + run_output.result._pod_version, # pylint: disable=protected-access + run_output.result._pod_serialization_version, # pylint: disable=protected-access + ) + ) self.target_uuid = uuid.uuid4() target_info = context.target_info + target_pod = target_info.to_pod() self.cursor.execute( self.sql_command['create_target'], ( self.target_uuid, self.run_uuid, - target_info.target, - target_info.cpus, - target_info.os, - target_info.os_version, - target_info.hostid, - target_info.hostname, - target_info.abi, - target_info.is_rooted, + target_pod['target'], + target_pod['cpus'], + target_pod['os'], + target_pod['os_version'], + target_pod['hostid'], + target_pod['hostname'], + target_pod['abi'], + target_pod['is_rooted'], # Important caveat: kernel_version is the name of the column in the Targets table # However, this refers to kernel_version.version, not to kernel_version as a whole - target_info.kernel_version.version, - target_info.kernel_version.release, + target_pod['kernel_version'], + target_pod['kernel_release'], target_info.kernel_version.sha1, target_info.kernel_config, - target_info.sched_features)) + target_pod['sched_features'], + target_pod['page_size_kb'], + # Android Specific + target_pod.get('screen_resolution'), + target_pod.get('prop'), + target_pod.get('android_id'), + target_pod.get('pod_version'), + target_pod.get('pod_serialization_version'), + ) + ) + # Commit cursor commands self.conn.commit() @@ -210,7 +234,26 @@ class PostgresqlResultProcessor(OutputProcessor): job_output.id, job_output.iteration, job_output.spec.workload_name, - job_output.metadata)) + job_output.metadata, + job_output.spec._pod_version, # pylint: disable=protected-access + job_output.spec._pod_serialization_version, # pylint: disable=protected-access + ) + ) + + for classifier in job_output.classifiers: + classifier_uuid = uuid.uuid4() + self.cursor.execute( + self.sql_command['create_classifier'], + ( + classifier_uuid, + None, + None, + job_uuid, + None, + classifier, + job_output.classifiers[classifier] + ) + ) # Update the run table and run-level parameters self.cursor.execute( self.sql_command['update_run'], @@ -219,7 +262,24 @@ class PostgresqlResultProcessor(OutputProcessor): run_output.status, run_output.state.timestamp, run_output.info.end_time, + None, + json.dumps(run_output.state.to_pod()), self.run_uuid)) + for classifier in run_output.classifiers: + classifier_uuid = uuid.uuid4() + self.cursor.execute( + self.sql_command['create_classifier'], + ( + classifier_uuid, + None, + None, + None, + None, + self.run_uuid, + classifier, + run_output.classifiers[classifier] + ) + ) self.sql_upload_artifacts(run_output, record_in_added=True) self.sql_upload_metrics(run_output, record_in_added=True) self.sql_upload_augmentations(run_output) @@ -253,19 +313,27 @@ class PostgresqlResultProcessor(OutputProcessor): ( job_status, job_id, - self.run_uuid)) + self.run_uuid + ) + ) run_uuid = self.run_uuid # Update the run entry after jobs have completed + run_info_pod = run_output.info.to_pod() + run_state_pod = run_output.state.to_pod() sql_command_update_run = self.sql_command['update_run'] self.cursor.execute( sql_command_update_run, ( run_output.event_summary, run_output.status, - run_output.state.timestamp, - run_output.info.end_time, - run_uuid)) + run_info_pod['start_time'], + run_info_pod['end_time'], + run_info_pod['duration'], + json.dumps(run_state_pod), + run_uuid, + ) + ) self.sql_upload_events(run_output) self.sql_upload_artifacts(run_output, check_uniqueness=True) self.sql_upload_metrics(run_output, check_uniqueness=True) @@ -282,11 +350,14 @@ class PostgresqlResultProcessor(OutputProcessor): ( resource_getter_uuid, self.run_uuid, - resource_getter)) + resource_getter, + ) + ) self.sql_upload_parameters( 'resource_getter', output_object.run_config.resource_getters[resource_getter], - owner_id=resource_getter_uuid) + owner_id=resource_getter_uuid, + ) def sql_upload_events(self, output_object, job_uuid=None): for event in output_object.events: @@ -298,7 +369,11 @@ class PostgresqlResultProcessor(OutputProcessor): self.run_uuid, job_uuid, event.timestamp, - event.message)) + event.message, + event._pod_version, # pylint: disable=protected-access + event._pod_serialization_version, # pylint: disable=protected-access + ) + ) def sql_upload_job_augmentations(self, output_object, job_uuid=None): ''' This is a table which links the uuids of augmentations to jobs. @@ -318,7 +393,9 @@ class PostgresqlResultProcessor(OutputProcessor): ( job_aug_uuid, job_uuid, - augmentation_uuid)) + augmentation_uuid, + ) + ) def sql_upload_augmentations(self, output_object): for augmentation in output_object.augmentations: @@ -330,11 +407,14 @@ class PostgresqlResultProcessor(OutputProcessor): ( augmentation_uuid, self.run_uuid, - augmentation)) + augmentation, + ) + ) self.sql_upload_parameters( 'augmentation', output_object.run_config.augmentations[augmentation], - owner_id=augmentation_uuid) + owner_id=augmentation_uuid, + ) self.augmentations_already_added[augmentation] = augmentation_uuid def sql_upload_metrics(self, output_object, record_in_added=False, check_uniqueness=False, job_uuid=None): @@ -351,7 +431,11 @@ class PostgresqlResultProcessor(OutputProcessor): metric.name, metric.value, metric.units, - metric.lower_is_better)) + metric.lower_is_better, + metric._pod_version, # pylint: disable=protected-access + metric._pod_serialization_version, # pylint: disable=protected-access + ) + ) for classifier in metric.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( @@ -360,8 +444,12 @@ class PostgresqlResultProcessor(OutputProcessor): classifier_uuid, None, metric_uuid, + None, + None, classifier, - metric.classifiers[classifier])) + metric.classifiers[classifier], + ) + ) if record_in_added: self.metrics_already_added.append(metric) @@ -372,7 +460,7 @@ class PostgresqlResultProcessor(OutputProcessor): ''' for artifact in output_object.artifacts: if artifact in self.artifacts_already_added and check_uniqueness: - self.logger.debug('Skipping uploading {} as already added' .format(artifact)) + self.logger.debug('Skipping uploading {} as already added'.format(artifact)) continue if artifact in self.artifacts_already_added: @@ -407,9 +495,11 @@ class PostgresqlResultProcessor(OutputProcessor): augmentation_id, resource_getter_id, parameter, - str(parameter_dict[parameter]), + json.dumps(parameter_dict[parameter]), str(type(parameter_dict[parameter])), - parameter_type)) + parameter_type, + ) + ) def connect_to_database(self): dsn = "dbname={} user={} password={} host={} port={}".format( @@ -430,6 +520,18 @@ class PostgresqlResultProcessor(OutputProcessor): self.conn.commit() self.conn.reset() + def verify_schema_versions(self): + local_schema_version, db_schema_version = get_schema_versions(self.conn) + if local_schema_version != db_schema_version: + self.cursor.close() + self.cursor = None + self.conn.commit() + self.conn.reset() + msg = 'The current database schema is v{} however the local ' \ + 'schema version is v{}. Please update your database ' \ + 'with the create command' + raise OutputProcessorError(msg.format(db_schema_version, local_schema_version)) + def _sql_write_lobject(self, source, lobject): with open(source) as lobj_file: lobj_data = lobj_file.read() @@ -456,7 +558,9 @@ class PostgresqlResultProcessor(OutputProcessor): self.sql_command['create_large_object'], ( large_object_uuid, - loid)) + loid, + ) + ) self.cursor.execute( self.sql_command['create_artifact'], ( @@ -466,7 +570,11 @@ class PostgresqlResultProcessor(OutputProcessor): artifact.name, large_object_uuid, artifact.description, - artifact.kind)) + str(artifact.kind), + artifact._pod_version, # pylint: disable=protected-access + artifact._pod_serialization_version, # pylint: disable=protected-access + ) + ) for classifier in artifact.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( @@ -475,7 +583,11 @@ class PostgresqlResultProcessor(OutputProcessor): classifier_uuid, artifact_uuid, None, + None, + None, classifier, - artifact.classifiers[classifier])) + artifact.classifiers[classifier], + ) + ) if record_in_added: self.artifacts_already_added[artifact] = loid diff --git a/external/workload-automation/wa/utils/misc.py b/external/workload-automation/wa/utils/misc.py index 6129a4c59e7cbcc5600accf01f67b33ea7e67e47..eed4792b4e22695abed4cf5608e42c8d44f68f65 100644 --- a/external/workload-automation/wa/utils/misc.py +++ b/external/workload-automation/wa/utils/misc.py @@ -626,3 +626,13 @@ def resolve_unique_domain_cpus(name, target): if domain_cpus[0] not in unique_cpus: unique_cpus.append(domain_cpus[0]) return unique_cpus + + +def format_ordered_dict(od): + """ + Provide a string representation of ordered dict that is similar to the + regular dict representation, as that is more concise and easier to read + than the default __str__ for OrderedDict. + """ + return '{{{}}}'.format(', '.join('{}={}'.format(k, v) + for k, v in od.items())) diff --git a/external/workload-automation/wa/utils/postgres_convert.py b/external/workload-automation/wa/utils/postgres.py similarity index 83% rename from external/workload-automation/wa/utils/postgres_convert.py rename to external/workload-automation/wa/utils/postgres.py index 3a983204f6feead7440608cde813f0eec4b421b4..1bedbbc6ac304ece6db5d9ab92e0e27db706b7f0 100644 --- a/external/workload-automation/wa/utils/postgres_convert.py +++ b/external/workload-automation/wa/utils/postgres.py @@ -28,6 +28,7 @@ http://initd.org/psycopg/docs/extensions.html#sql-adaptation-protocol-objects """ import re +import os try: from psycopg2 import InterfaceError @@ -39,6 +40,12 @@ except ImportError: from wa.utils.types import level +POSTGRES_SCHEMA_DIR = os.path.join(os.path.dirname(__file__), + '..', + 'commands', + 'postgres_schemas') + + def cast_level(value, cur): # pylint: disable=unused-argument """Generic Level caster for psycopg2""" if not InterfaceError: @@ -217,3 +224,37 @@ def adapt_list(param): final_string = final_string + str(item) + "," final_string = "{" + final_string + "}" return AsIs("'{}'".format(final_string)) + + +def get_schema(schemafilepath): + with open(schemafilepath, 'r') as sqlfile: + sql_commands = sqlfile.read() + + schema_major = None + schema_minor = None + # Extract schema version if present + if sql_commands.startswith('--!VERSION'): + splitcommands = sql_commands.split('!ENDVERSION!\n') + schema_major, schema_minor = splitcommands[0].strip('--!VERSION!').split('.') + schema_major = int(schema_major) + schema_minor = int(schema_minor) + sql_commands = splitcommands[1] + return schema_major, schema_minor, sql_commands + + +def get_database_schema_version(conn): + with conn.cursor() as cursor: + cursor.execute('''SELECT + DatabaseMeta.schema_major, + DatabaseMeta.schema_minor + FROM + DatabaseMeta;''') + schema_major, schema_minor = cursor.fetchone() + return (schema_major, schema_minor) + + +def get_schema_versions(conn): + schemafilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql') + cur_major_version, cur_minor_version, _ = get_schema(schemafilepath) + db_schema_version = get_database_schema_version(conn) + return (cur_major_version, cur_minor_version), db_schema_version diff --git a/external/workload-automation/wa/utils/serializer.py b/external/workload-automation/wa/utils/serializer.py index e3b3b548ed9c884e279263388e334d326cde22ff..4e0a79b5082b30508473c23f0eeb9f4edeae2b7e 100644 --- a/external/workload-automation/wa/utils/serializer.py +++ b/external/workload-automation/wa/utils/serializer.py @@ -139,6 +139,8 @@ class WAJSONDecoder(_json.JSONDecoder): return v def load_objects(d): + if not hasattr(d, 'items'): + return d pairs = [] for k, v in d.items(): if hasattr(v, 'items'): @@ -167,14 +169,14 @@ class json(object): try: return _json.load(fh, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs) except ValueError as e: - raise SerializerSyntaxError(e.message) + raise SerializerSyntaxError(e.args[0]) @staticmethod def loads(s, *args, **kwargs): try: return _json.loads(s, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs) except ValueError as e: - raise SerializerSyntaxError(e.message) + raise SerializerSyntaxError(e.args[0]) _mapping_tag = _yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG @@ -361,3 +363,33 @@ def is_pod(obj): if not is_pod(v): return False return True + + +class Podable(object): + + _pod_serialization_version = 0 + + @classmethod + def from_pod(cls, pod): + pod = cls._upgrade_pod(pod) + instance = cls() + instance._pod_version = pod.pop('_pod_version') # pylint: disable=protected-access + return instance + + @classmethod + def _upgrade_pod(cls, pod): + _pod_serialization_version = pod.pop('_pod_serialization_version', None) or 0 + while _pod_serialization_version < cls._pod_serialization_version: + _pod_serialization_version += 1 + upgrade = getattr(cls, '_pod_upgrade_v{}'.format(_pod_serialization_version)) + pod = upgrade(pod) + return pod + + def __init__(self): + self._pod_version = self._pod_serialization_version + + def to_pod(self): + pod = {} + pod['_pod_version'] = self._pod_version + pod['_pod_serialization_version'] = self._pod_serialization_version + return pod diff --git a/external/workload-automation/wa/utils/types.py b/external/workload-automation/wa/utils/types.py index a498778af8fc7436c94e8bd22d34fb8f5d426936..751e0bbf093aa6f7d8865e47d489e99c4199c068 100644 --- a/external/workload-automation/wa/utils/types.py +++ b/external/workload-automation/wa/utils/types.py @@ -635,7 +635,10 @@ def enum(args, start=0, step=1): if name == attr: return attr - raise ValueError('Invalid enum value: {}'.format(repr(name))) + try: + return Enum.from_pod(name) + except ValueError: + raise ValueError('Invalid enum value: {}'.format(repr(name))) reserved = ['values', 'levels', 'names'] diff --git a/external/workload-automation/wa/workloads/androbench/com.arm.wa.uiauto.androbench.apk b/external/workload-automation/wa/workloads/androbench/com.arm.wa.uiauto.androbench.apk index 3c9abd3a6089f134130e67bf834cfd44868c2197..0b5801837d124806039c3843a907952588b28614 100644 Binary files a/external/workload-automation/wa/workloads/androbench/com.arm.wa.uiauto.androbench.apk and b/external/workload-automation/wa/workloads/androbench/com.arm.wa.uiauto.androbench.apk differ diff --git a/external/workload-automation/wa/workloads/androbench/uiauto/app/src/main/java/com/arm/wa/uiauto/androbench/UiAutomation.java b/external/workload-automation/wa/workloads/androbench/uiauto/app/src/main/java/com/arm/wa/uiauto/androbench/UiAutomation.java index ae482c32f13f7982dd97bc9e1dfd11b659353857..225d67bb7a5ba192a0e5c61fdff25291f5b34c35 100755 --- a/external/workload-automation/wa/workloads/androbench/uiauto/app/src/main/java/com/arm/wa/uiauto/androbench/UiAutomation.java +++ b/external/workload-automation/wa/workloads/androbench/uiauto/app/src/main/java/com/arm/wa/uiauto/androbench/UiAutomation.java @@ -98,17 +98,23 @@ public class UiAutomation extends BaseUiAutomation { UiScrollable scrollView = new UiScrollable(new UiSelector().scrollable(true)); Log.d(TAG, "Sequential Read Score " + seqRead.getText()); - scrollView.scrollIntoView(seqWrite); + + if (scrollView.exists()){scrollView.scrollIntoView(seqWrite); } Log.d(TAG, "Sequential Write Score " + seqWrite.getText()); - scrollView.scrollIntoView(ranRead); + + if (scrollView.exists()){scrollView.scrollIntoView(ranRead);} Log.d(TAG, "Random Read Score " + ranRead.getText()); - scrollView.scrollIntoView(ranWrite); + + if (scrollView.exists()){scrollView.scrollIntoView(ranWrite);} Log.d(TAG, "Random Write Score " + ranWrite.getText()); - scrollView.scrollIntoView(sqlInsert); + + if (scrollView.exists()){scrollView.scrollIntoView(sqlInsert);} Log.d(TAG, "SQL Insert Score " + sqlInsert.getText()); - scrollView.scrollIntoView(sqlUpdate); + + if (scrollView.exists()){scrollView.scrollIntoView(sqlUpdate);} Log.d(TAG, "SQL Update Score " + sqlUpdate.getText()); - scrollView.scrollIntoView(sqlDelete); + + if (scrollView.exists()){scrollView.scrollIntoView(sqlDelete);} Log.d(TAG, "SQL Delete Score " + sqlDelete.getText()); } } diff --git a/external/workload-automation/wa/workloads/googlemaps/com.arm.wa.uiauto.googlemaps.apk b/external/workload-automation/wa/workloads/googlemaps/com.arm.wa.uiauto.googlemaps.apk index ccf52ef1e3c0e43dde5a9b602cb11c455e263810..16f92f3b34842e12d71e7ceb1e33e440e67a928a 100644 Binary files a/external/workload-automation/wa/workloads/googlemaps/com.arm.wa.uiauto.googlemaps.apk and b/external/workload-automation/wa/workloads/googlemaps/com.arm.wa.uiauto.googlemaps.apk differ diff --git a/external/workload-automation/wa/workloads/googlemaps/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutomation.java b/external/workload-automation/wa/workloads/googlemaps/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutomation.java index 2397775e2135667a7678194e97ec0660fc59db39..c1cc5aa747078f88946b95818707037c65b2c91c 100644 --- a/external/workload-automation/wa/workloads/googlemaps/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutomation.java +++ b/external/workload-automation/wa/workloads/googlemaps/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutomation.java @@ -126,6 +126,9 @@ public class UiAutomation extends BaseUiAutomation { public void getDirectionsFromLocation() throws Exception { UiObject directions = mDevice.findObject(new UiSelector().resourceId(packageID + "placepage_directions_button")); + if (!directions.exists()){ + directions = mDevice.findObject(new UiSelector().textContains("DIRECTIONS")); + } directions.clickAndWaitForNewWindow(uiAutoTimeout); } @@ -140,7 +143,9 @@ public class UiAutomation extends BaseUiAutomation { public void viewRouteSteps() throws Exception { UiObject steps = mDevice.findObject(new UiSelector().textContains("STEPS & MORE") .className("android.widget.TextView")); - steps.clickAndWaitForNewWindow(uiAutoTimeout); + if (steps.exists()){ + steps.clickAndWaitForNewWindow(uiAutoTimeout); + } } public void previewRoute() throws Exception { diff --git a/external/workload-automation/wa/workloads/googleslides/com.arm.wa.uiauto.googleslides.apk b/external/workload-automation/wa/workloads/googleslides/com.arm.wa.uiauto.googleslides.apk index 347f52a00875ccd68987e30451c366370f6dc22d..eaeca1c6e4e2d8a49428974c1e889b44cf7f2d53 100644 Binary files a/external/workload-automation/wa/workloads/googleslides/com.arm.wa.uiauto.googleslides.apk and b/external/workload-automation/wa/workloads/googleslides/com.arm.wa.uiauto.googleslides.apk differ diff --git a/external/workload-automation/wa/workloads/googleslides/uiauto/app/src/main/java/com/arm/wa/uiauto/googleslides/UiAutomation.java b/external/workload-automation/wa/workloads/googleslides/uiauto/app/src/main/java/com/arm/wa/uiauto/googleslides/UiAutomation.java index a62ee643da611a2b077938538bff9f1529564992..29125dcabca99da3ec6105170d8b19dd4c5a8506 100755 --- a/external/workload-automation/wa/workloads/googleslides/uiauto/app/src/main/java/com/arm/wa/uiauto/googleslides/UiAutomation.java +++ b/external/workload-automation/wa/workloads/googleslides/uiauto/app/src/main/java/com/arm/wa/uiauto/googleslides/UiAutomation.java @@ -81,11 +81,11 @@ public class UiAutomation extends BaseUiAutomation { sleep(1); enablePowerpointCompat(); sleep(1); - testEditNewSlidesDocument(newDocumentName, workingDirectoryName, doTextEntry); } @Test public void runWorkload() throws Exception { + testEditNewSlidesDocument(newDocumentName, workingDirectoryName, doTextEntry); openDocument(pushedDocumentName, workingDirectoryName); waitForProgress(WAIT_TIMEOUT_1SEC*30); testSlideshowFromStorage(slideCount); @@ -130,8 +130,22 @@ public class UiAutomation extends BaseUiAutomation { } public void insertSlide(String slideLayout) throws Exception { - clickUiObject(BY_DESC, "Add slide", true); - clickUiObject(BY_TEXT, slideLayout, true); + UiObject add_slide = + mDevice.findObject(new UiSelector().descriptionContains("Add slide")); + add_slide.click(); + + UiObject slide_layout = mDevice.findObject(new UiSelector().textContains(slideLayout)); + + if (!slide_layout.exists()){ + tapOpenArea(); + UiObject done_button = mDevice.findObject(new UiSelector().resourceId("android:id/action_mode_close_button")); + if (done_button.exists()){ + done_button.click(); + } + add_slide.click(); + } + slide_layout.click(); + } public void insertImage(String workingDirectoryName) throws Exception { @@ -165,7 +179,7 @@ public class UiAutomation extends BaseUiAutomation { mDevice.pressBack(); showRoots(); } - if (localDevice.exists()){ + else if (localDevice.exists()){ localDevice.click(); } @@ -185,6 +199,10 @@ public class UiAutomation extends BaseUiAutomation { } else { picture.click(); } + UiObject done_button = mDevice.findObject(new UiSelector().resourceId("android:id/action_mode_close_button")); + if (done_button.exists()){ + done_button.click(); + } } public void insertShape(String shapeName) throws Exception { @@ -238,20 +256,29 @@ public class UiAutomation extends BaseUiAutomation { clickUiObject(BY_TEXT, "Device storage", true); UiObject workingDirectory = mDevice.findObject(new UiSelector().text(workingDirectoryName)); - if (!workingDirectory.exists()) { + UiObject nav_button = mDevice.findObject(new UiSelector().resourceId(packageID + "file_picker_nav_up_btn")); + UiObject folderEntry = mDevice.findObject(new UiSelector().textContains(workingDirectoryName)); + if (workingDirectory.exists()) { + folderEntry.clickAndWaitForNewWindow(); + } + else if (nav_button.exists()) { + while (nav_button.exists()) { + nav_button.click(); + } + clickUiObject(BY_TEXT, "Internal Storage", true); + } + else { showRoots(); UiObject localDevice = mDevice.findObject(new UiSelector().textMatches(".*[GM]B free")); localDevice.click(); - UiObject folderEntry = mDevice.findObject(new UiSelector().textContains(workingDirectoryName)); UiScrollable list = new UiScrollable(new UiSelector().scrollable(true)); if (!folderEntry.exists() && list.waitForExists(WAIT_TIMEOUT_1SEC)) { list.scrollIntoView(folderEntry); } else { folderEntry.waitForExists(WAIT_TIMEOUT_1SEC*10); } - folderEntry.clickAndWaitForNewWindow(); } - + UiScrollable list = new UiScrollable(new UiSelector().className("android.widget.ListView")); if (list.exists()){ @@ -266,7 +293,7 @@ public class UiAutomation extends BaseUiAutomation { logger.start(); clickUiObject(BY_TEXT, docName); - UiObject open = + UiObject open = mDevice.findObject(new UiSelector().text("Open")); if (open.exists()) { open.click(); @@ -369,7 +396,7 @@ public class UiAutomation extends BaseUiAutomation { protected void skipWelcomeScreen() throws Exception { UiObject skip = - mDevice.findObject(new UiSelector().textMatches("Skip")); + mDevice.findObject(new UiSelector().textMatches("Skip|SKIP")); if (skip.exists()) { skip.click(); } @@ -421,7 +448,7 @@ public class UiAutomation extends BaseUiAutomation { insertShape(shapeName); modifyShape(shapeName); mDevice.pressBack(); - UiObject today = + UiObject today = mDevice.findObject(new UiSelector().text("Today")); if (!today.exists()){ mDevice.pressBack(); @@ -542,7 +569,7 @@ public class UiAutomation extends BaseUiAutomation { window.click(); } } - + private void showRoots() throws Exception { UiObject rootMenu = mDevice.findObject(new UiSelector().descriptionContains("Show root")); diff --git a/external/workload-automation/wa/workloads/speedometer/com.arm.wa.uiauto.speedometer.apk b/external/workload-automation/wa/workloads/speedometer/com.arm.wa.uiauto.speedometer.apk index 959f16ac1c2ae9fcf137ea007c31df775d2dd8e0..926ca759fbf31c17cce5df72aac19ee9b57f6ea1 100755 Binary files a/external/workload-automation/wa/workloads/speedometer/com.arm.wa.uiauto.speedometer.apk and b/external/workload-automation/wa/workloads/speedometer/com.arm.wa.uiauto.speedometer.apk differ diff --git a/external/workload-automation/wa/workloads/speedometer/uiauto/app/src/main/java/com/arm/wa/uiauto/speedometer/UiAutomation.java b/external/workload-automation/wa/workloads/speedometer/uiauto/app/src/main/java/com/arm/wa/uiauto/speedometer/UiAutomation.java index 19f492b78aa70ef9423ab6e030a96ff3f1b6a5d1..5b787caae170aa17bf2b358a3316398acf5f7420 100755 --- a/external/workload-automation/wa/workloads/speedometer/uiauto/app/src/main/java/com/arm/wa/uiauto/speedometer/UiAutomation.java +++ b/external/workload-automation/wa/workloads/speedometer/uiauto/app/src/main/java/com/arm/wa/uiauto/speedometer/UiAutomation.java @@ -62,12 +62,12 @@ public class UiAutomation extends BaseUiAutomation { } public void clearFirstRun() throws Exception { - UiObject accept = + UiObject accept = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/terms_accept") .className("android.widget.Button")); if (accept.exists()){ accept.click(); - UiObject negative = + UiObject negative = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/negative_button") .className("android.widget.Button")); negative.waitForExists(100000); @@ -76,18 +76,18 @@ public class UiAutomation extends BaseUiAutomation { } public void runBenchmark() throws Exception { - UiObject start = + UiObject start = mDevice.findObject(new UiSelector().description("Start Test") .className("android.widget.Button")); - if (start.exists()){ + if (start.waitForExists(2000)){ start.click(); } else { - UiObject starttext = + UiObject starttext = mDevice.findObject(new UiSelector().text("Start Test") .className("android.widget.Button")); starttext.click(); } - UiObject scores = + UiObject scores = mDevice.findObject(new UiSelector().resourceId("result-number") .className("android.view.View")); scores.waitForExists(2100000); @@ -97,17 +97,19 @@ public class UiAutomation extends BaseUiAutomation { } public void clearTabs() throws Exception { - UiObject tabselector = + UiObject tabselector = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/tab_switcher_button") .className("android.widget.ImageButton")); + if (!tabselector.exists()){ + return; + } tabselector.click(); - UiObject menu = + UiObject menu = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/menu_button") .className("android.widget.ImageButton")); menu.click(); - UiObject closetabs = + UiObject closetabs = mDevice.findObject(new UiSelector().textContains("Close all tabs")); closetabs.click(); } } - diff --git a/lisa/energy_model.py b/lisa/energy_model.py index aece2ca6545b6be30b3390af3ff98c25be8e6a4e..29da91fefdf4f5d1a9bc56cd08c62c98f7d7e22c 100644 --- a/lisa/energy_model.py +++ b/lisa/energy_model.py @@ -15,10 +15,11 @@ # limitations under the License. # -from collections import namedtuple, OrderedDict +from collections import namedtuple, OrderedDict, defaultdict from itertools import product import logging import operator +import warnings import re from lisa.utils import Loggable, Serializable, memoized, groupby @@ -793,30 +794,136 @@ class EnergyModel(Serializable, Loggable): # Again, not strictly necessary, just more pleasant. return sorted(ret, key=lambda x: x[0]) + def _simple_em_root(target, pd_attr, cpu_to_pd): + # pd_attr is a dict tree like this + # { + # "pd0": { + # "capacity": [236, 301, 367, 406, 446 ], + # "frequency": [ 450000, 575000, 700000, 775000, 850000 ], + # "power": [ 42, 58, 79, 97, 119 ] + # }, + # "pd1": { + # "capacity": [ 418, 581, 744, 884, 1024 ], + # "frequency": [ 450000, 625000, 800000, 950000, 1100000 ], + # "power": [ 160, 239, 343, 454, 583 ] + # } + # } + def simple_read_idle_states(cpu, target): + # idle states are not supported in the simple model + # record 0 power for them all, but name them according to target + names = [s.name for s in target.cpuidle.get_states(cpu)] + return OrderedDict((name, 0) for name in names) + + def simple_read_active_states(pd): + cstates = list(zip(pd['capacity'], pd['power'])) + active_states = [ActiveState(c, p) for c, p in cstates] + return OrderedDict(zip(pd['frequency'], active_states)) + + cpu_nodes = [] + for cpu in range(target.number_of_cpus): + pd = pd_attr[cpu_to_pd[cpu]] + node = EnergyModelNode( + cpu=cpu, + active_states=simple_read_active_states(pd), + idle_states=simple_read_idle_states(cpu, target)) + cpu_nodes.append(node) + + return EnergyModelRoot(children=cpu_nodes) + + def _simple_pd_root(target): + # We don't have a way to read the idle power domains from sysfs (the + # kernel isn't even aware of them) so we'll just have to assume each CPU + # is its own power domain and all idle states are independent of each + # other. + cpu_pds = [] + for cpu in range(target.number_of_cpus): + names = [s.name for s in target.cpuidle.get_states(cpu)] + cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names)) + return PowerDomain(children=cpu_pds, idle_states=[]) + @classmethod - def from_simplifiedEM_target(cls, target, - directory='/sys/devices/system/cpu/energy_model'): + def from_debugfsEM_target(cls, target, + directory='/sys/kernel/debug/energy_model'): """ Create an EnergyModel by reading a target filesystem on a device with - the new Simplified Energy Model present. + the new Simplified Energy Model present in debugfs. - This uses the energy_model sysctl added by EAS patches to exposes - the frequency domains, together with a tuple of capacity, frequency - and active power for each CPU. This feature is not upstream in mainline - Linux (as of v4.17), and only exists in Android kernels later than - android-4.14. + This uses the energy_model debugfs used usptream to expose the + performance domains, their frequencies and power costs. This feature is + upstream as of Linux 5.1. It is also available on Android 4.19 and + later. Wrt. idle states - the EnergyModel constructed won't be aware of any power data or topological dependencies for entering "cluster" idle states since the simplified model has no such concept. - Initialises only Active States for CPUs and clears all other levels. + Initialises only class:`ActiveStates` for CPUs and clears all other + levels. + + :param target: :class:`devlib.target.Target` object to read filesystem + from. Must have cpufreq and cpuidle modules enabled. + :returns: Constructed EnergyModel object based on the parameters + reported by the target. + """ + + if 'cpuidle' not in target.modules: + raise TargetStableError('Requires cpuidle devlib module. Please ensure "cpuidle" is listed in your target/test modules') + + sysfs = '/sys/devices/system/cpu/cpu{}/cpu_capacity' + pd_attr = defaultdict(dict) + cpu_to_pd = {} + + debugfs_em = target.read_tree_values(directory, depth=3) + if not debugfs_em: + raise TargetStableError('Energy Model not exposed at {} in sysfs.'.format(directory)) + + for pd in debugfs_em: + # Read the CPUMask + pd_attr[pd]['cpus'] = ranges_to_list(debugfs_em[pd]['cpus']) + for cpu in pd_attr[pd]['cpus']: + cpu_to_pd[cpu] = pd + + # Read the frequency and power costs + pd_attr[pd]['frequency'] = [] + pd_attr[pd]['power'] = [] + cstates = [k for k in debugfs_em[pd].keys() if 'cs:' in k] + cstates = sorted(cstates, key=lambda cs: int(cs.replace('cs:',''))) + for cs in cstates: + pd_attr[pd]['frequency'].append(int(debugfs_em[pd][cs]['frequency'])) + pd_attr[pd]['power'].append(int(debugfs_em[pd][cs]['power'])) + + # Compute the intermediate capacities + cap = target.read_value(sysfs.format(pd_attr[pd]['cpus'][0]), int) + max_freq = max(pd_attr[pd]['frequency']) + caps = [f * cap / max_freq for f in pd_attr[pd]['frequency']] + pd_attr[pd]['capacity'] = caps + + root_em = cls._simple_em_root(target, pd_attr, cpu_to_pd) + root_pd = cls._simple_pd_root(target) + perf_domains = [pd_attr[pd]['cpus'] for pd in pd_attr] + + return cls(root_node=root_em, + root_power_domain=root_pd, + freq_domains=perf_domains) + + @classmethod + def from_sysfsEM_target(cls, target, + directory='/sys/devices/system/cpu/energy_model'): + """ + Create an EnergyModel by reading a target filesystem on a device with + the new Simplified Energy Model present in sysfs. + + The patches exposing the Energy Model in sysfs have been abandonned + and this way of loading it is now deprecated. :param target: Devlib target object to read filesystem from. Must have cpufreq and cpuidle modules enabled. :returns: Constructed EnergyModel object based on the parameters reported by the target. """ + warnings.warn('The Energy Model in sysfs is DEPRECATED. Please use debugfs instead.', + DeprecationWarning) + if 'cpuidle' not in target.modules: raise TargetStableError('Requires cpuidle devlib module. Please ensure ' '"cpuidle" is listed in your target/test modules') @@ -859,41 +966,11 @@ class EnergyModel(Serializable, Loggable): caps = [f * cap / max_freq for f in sysfs_em[fd]['frequency']] sysfs_em[fd]['capacity'] = caps - def read_active_states(cpu): - fd = sysfs_em[cpu_to_fdom[cpu]] - cstates = list(zip(fd['capacity'], fd['power'])) - active_states = [ActiveState(c, p) for c, p in cstates] - return OrderedDict(list(zip(fd['frequency'], active_states))) - - def read_idle_states(cpu): - # idle states are not supported in the new model - # record 0 power for them all, but name them according to target - names = [s.name for s in target.cpuidle.get_states(cpu)] - return OrderedDict((name, 0) for name in names) - - # Read the CPU-level data - cpus = list(range(target.number_of_cpus)) - cpu_nodes = [] - for cpu in cpus: - node = EnergyModelNode( - cpu=cpu, - active_states=read_active_states(cpu), - idle_states=read_idle_states(cpu)) - cpu_nodes.append(node) - - root = EnergyModelRoot(children=cpu_nodes) + root_em = cls._simple_em_root(target, sysfs_em, cpu_to_fdom) + root_pd = cls._simple_pd_root(target) freq_domains = [sysfs_em[fdom]['cpus'] for fdom in sysfs_em] - # We don't have a way to read the idle power domains from sysfs (the kernel - # isn't even aware of them) so we'll just have to assume each CPU is its - # own power domain and all idle states are independent of each other. - cpu_pds = [] - for cpu in cpus: - names = [s.name for s in target.cpuidle.get_states(cpu)] - cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names)) - - root_pd = PowerDomain(children=cpu_pds, idle_states=[]) - return cls(root_node=root, + return cls(root_node=root_em, root_power_domain=root_pd, freq_domains=freq_domains) @@ -1066,9 +1143,18 @@ class EnergyModel(Serializable, Loggable): directory = '/sys/devices/system/cpu/energy_model' return target.directory_exists(directory) - load = cls.from_simplifiedEM_target + load = cls.from_sysfsEM_target + + class DebugfsEMLoader: + @staticmethod + def check(target): + directory = '/sys/kernel/debug/energy_model' + + return target.file_exists(directory) + + load = cls.from_debugfsEM_target - for loader_cls in (SDEMLoader, SysfsEMLoader): + for loader_cls in (SDEMLoader, SysfsEMLoader, DebugfsEMLoader): try: em_present = loader_cls.check(target) except Exception: