diff --git a/external/devlib.manifest.yaml b/external/devlib.manifest.yaml index 249405c3b1837c30353bc655d7060dda2410d662..42c72d7db0331dd14df66de9e072bd1b9523cb4c 100644 --- a/external/devlib.manifest.yaml +++ b/external/devlib.manifest.yaml @@ -14,12 +14,37 @@ rebase-conf: - remote: douglas base: master - tip: fix_nest_asyncio + tip: fix_signals - remote: douglas base: master - tip: fix_signals + tip: fix_adb_unroot + + - + remote: douglas + base: master + tip: fix_cpufreq_event + + - + remote: douglas + base: master + tip: fix_kprobe_events2 + + - + remote: douglas + base: master + tip: fix_disconnect + + - + remote: douglas + base: master + tip: android_dmesg + + - + remote: douglas + base: master + tip: fix_ssh_pull remotes: upstream: diff --git a/external/devlib/devlib/collector/dmesg.py b/external/devlib/devlib/collector/dmesg.py index 617258c405748fe9a2ca15824f1e1975bc459ef3..06676aaa6b8c137b4b742aa82f55b89c61312847 100644 --- a/external/devlib/devlib/collector/dmesg.py +++ b/external/devlib/devlib/collector/dmesg.py @@ -16,6 +16,7 @@ import re from itertools import takewhile from datetime import timedelta +import logging from devlib.collector import (CollectorBase, CollectorOutput, CollectorOutputEntry) @@ -23,6 +24,9 @@ from devlib.exception import TargetStableError from devlib.utils.misc import memoized +_LOGGER = logging.getLogger('dmesg') + + class KernelLogEntry(object): """ Entry of the kernel ring buffer. @@ -112,17 +116,35 @@ class KernelLogEntry(object): ) @classmethod - def from_dmesg_output(cls, dmesg_out): + def from_dmesg_output(cls, dmesg_out, error=None): """ Return a generator of :class:`KernelLogEntry` for each line of the output of dmesg command. + :param error: If ``"raise"`` or ``None``, an exception will be raised + if a parsing error occurs. If ``"warn"``, it will be logged at + WARNING level. If ``"ignore"``, it will be ignored. If a callable + is passed, the exception will be passed to it. + :type error: str or None or typing.Callable[[BaseException], None] + .. note:: The same restrictions on the dmesg output format as for :meth:`from_str` apply. """ for i, line in enumerate(dmesg_out.splitlines()): if line.strip(): - yield cls.from_str(line, line_nr=i) + try: + yield cls.from_str(line, line_nr=i) + except Exception as e: + if error in (None, 'raise'): + raise e + elif error == 'warn': + _LOGGER.warn(f'error while parsing line "{line!r}": {e}') + elif error == 'ignore': + pass + elif callable(error): + error(e) + else: + raise ValueError(f'Unknown error handling strategy: {error}') def __str__(self): facility = self.facility + ': ' if self.facility else '' @@ -167,7 +189,7 @@ class DmesgCollector(CollectorBase): "debug", # debug-level messages ] - def __init__(self, target, level=LOG_LEVELS[-1], facility='kern', empty_buffer=False): + def __init__(self, target, level=LOG_LEVELS[-1], facility='kern', empty_buffer=False, parse_error=None): super(DmesgCollector, self).__init__(target) if not target.is_rooted: @@ -181,12 +203,16 @@ class DmesgCollector(CollectorBase): )) self.level = level - # Check if dmesg is the BusyBox one, or the one from util-linux in a - # recent version. - # Note: BusyBox dmesg does not support -h, but will still print the - # help with an exit code of 1 - self.basic_dmesg = '--force-prefix' not in \ - self.target.execute('dmesg -h', check_exit_code=False) + # Check if we have a dmesg from a recent util-linux build, rather than + # e.g. busybox's dmesg or the one shipped on some Android versions + # (toybox). Note: BusyBox dmesg does not support -h, but will still + # print the help with an exit code of 1 + help_ = self.target.execute('dmesg -h', check_exit_code=False) + self.basic_dmesg = not all( + opt in help_ + for opt in ('--facility', '--force-prefix', '--decode', '--level') + ) + self.facility = facility try: needs_root = target.read_sysctl('kernel.dmesg_restrict') @@ -199,6 +225,7 @@ class DmesgCollector(CollectorBase): self._begin_timestamp = None self.empty_buffer = empty_buffer self._dmesg_out = None + self._parse_error = parse_error @property def dmesg_out(self): @@ -216,11 +243,15 @@ class DmesgCollector(CollectorBase): @property def entries(self): - return self._get_entries(self._dmesg_out, self._begin_timestamp) + return self._get_entries( + self._dmesg_out, + self._begin_timestamp, + error=self._parse_error, + ) @memoized - def _get_entries(self, dmesg_out, timestamp): - entries = KernelLogEntry.from_dmesg_output(dmesg_out) + def _get_entries(self, dmesg_out, timestamp, error): + entries = KernelLogEntry.from_dmesg_output(dmesg_out, error=error) entries = list(entries) if timestamp is None: return entries diff --git a/external/devlib/devlib/collector/ftrace.py b/external/devlib/devlib/collector/ftrace.py index df8ec216245e410e99c52697e2cc56586eccdbd7..0aea8eeac0ae592d2b59ff8690b401c2899fe5b4 100644 --- a/external/devlib/devlib/collector/ftrace.py +++ b/external/devlib/devlib/collector/ftrace.py @@ -95,7 +95,6 @@ class FtraceCollector(CollectorBase): self.host_binary = None self.start_time = None self.stop_time = None - self.event_string = None self.function_string = None self.trace_clock = trace_clock self.saved_cmdlines_nr = saved_cmdlines_nr @@ -111,7 +110,8 @@ class FtraceCollector(CollectorBase): self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled') self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker') self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter') - self.available_tracers_file = self.target.path.join(self.tracing_path, 'available_tracers') + self.available_tracers_file = self.target.path.join(self.tracing_path, 'available_tracers') + self.kprobe_events_file = self.target.path.join(self.tracing_path, 'kprobe_events') self.host_binary = which('trace-cmd') self.kernelshark = which('kernelshark') @@ -198,7 +198,11 @@ class FtraceCollector(CollectorBase): elif self.tracer == 'function_graph': self.function_string = _build_graph_functions(selected_functions, trace_children_functions) - self.event_string = _build_trace_events(selected_events) + self._selected_events = selected_events + + @property + def event_string(self): + return _build_trace_events(self._selected_events) @classmethod def _resolve_tracing_path(cls, target, path): @@ -244,6 +248,12 @@ class FtraceCollector(CollectorBase): return self.target.read_value(self.available_functions_file).splitlines() def reset(self): + # Save kprobe events + try: + kprobe_events = self.target.read_value(self.kprobe_events_file) + except TargetStableError: + kprobe_events = None + self.target.execute('{} reset -B devlib'.format(self.target_binary), as_root=True, timeout=TIMEOUT) @@ -260,8 +270,33 @@ class FtraceCollector(CollectorBase): if self.functions: self.target.write_value(self.function_profile_file, 0, verify=False) + + # Restore kprobe events + if kprobe_events: + self.target.write_value(self.kprobe_events_file, kprobe_events) + self._reset_needed = False + def _trace_frequencies(self): + if 'cpu_frequency' in self._selected_events: + self.logger.debug('Trace CPUFreq frequencies') + try: + mod = self.target.cpufreq + except TargetStableError as e: + self.logger.error(f'Could not trace CPUFreq frequencies as the cpufreq module cannot be loaded: {e}') + else: + mod.trace_frequencies() + + def _trace_idle(self): + if 'cpu_idle' in self._selected_events: + self.logger.debug('Trace CPUIdle states') + try: + mod = self.target.cpuidle + except TargetStableError as e: + self.logger.error(f'Could not trace CPUIdle states as the cpuidle module cannot be loaded: {e}') + else: + mod.perturb_cpus() + @asyncf async def start(self): self.start_time = time.time() @@ -309,12 +344,10 @@ class FtraceCollector(CollectorBase): if self.automark: self.mark_start() - if 'cpufreq' in self.target.modules: - self.logger.debug('Trace CPUFreq frequencies') - self.target.cpufreq.trace_frequencies() - if 'cpuidle' in self.target.modules: - self.logger.debug('Trace CPUIdle states') - self.target.cpuidle.perturb_cpus() + + self._trace_frequencies() + self._trace_idle() + # Enable kernel function profiling if self.functions and self.tracer is None: target = self.target @@ -335,9 +368,6 @@ class FtraceCollector(CollectorBase): if self.functions and self.tracer is None: self.target.execute('echo 0 > {}'.format(self.function_profile_file), as_root=True) - if 'cpufreq' in self.target.modules: - self.logger.debug('Trace CPUFreq frequencies') - self.target.cpufreq.trace_frequencies() self.stop_time = time.time() if self.automark: self.mark_stop() diff --git a/external/devlib/devlib/host.py b/external/devlib/devlib/host.py index 70d7943aa0dcd4ff6121fce8a270c4a93f6f6ac3..a65b00f9ff060a2793140c96b3f38c962ab9c14b 100644 --- a/external/devlib/devlib/host.py +++ b/external/devlib/devlib/host.py @@ -112,7 +112,9 @@ class LocalConnection(ConnectionBase): if self.unrooted: raise TargetStableError('unrooted') password = self._get_password() - command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command)) + # Empty prompt with -p '' to avoid adding a leading space to the + # output. + command = "echo {} | sudo -k -p '' -S -- sh -c {}".format(quote(password), quote(command)) ignore = None if check_exit_code else 'all' try: stdout, stderr = check_output(command, shell=True, timeout=timeout, ignore=ignore) @@ -136,9 +138,9 @@ class LocalConnection(ConnectionBase): if self.unrooted: raise TargetStableError('unrooted') password = self._get_password() - # The sudo prompt will add a space on stderr, but we cannot filter - # it out here - command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command)) + # Empty prompt with -p '' to avoid adding a leading space to the + # output. + command = "echo {} | sudo -k -p '' -S -- sh -c {}".format(quote(password), quote(command)) # Make sure to get a new PGID so PopenBackgroundCommand() can kill # all sub processes that could be started without troubles. diff --git a/external/devlib/devlib/target.py b/external/devlib/devlib/target.py index d686049794d1ed1a0fe36ce14674f7f3dfd89dc3..5c6799fc5af584a7f2d89cd03150ccbfdd58e757 100644 --- a/external/devlib/devlib/target.py +++ b/external/devlib/devlib/target.py @@ -312,7 +312,9 @@ class Target(object): connection_settings=None, platform=None, working_directory=None, + *, executables_directory=None, + tmp_directory=None, connect=True, modules=None, load_default_modules=True, @@ -347,6 +349,7 @@ class Target(object): self.connection_settings['platform'] = self.platform self.working_directory = working_directory self.executables_directory = executables_directory + self.tmp_directory = tmp_directory self.load_default_modules = load_default_modules self.shell_prompt = bytes_regex(shell_prompt) self.conn_cls = conn_cls @@ -356,7 +359,6 @@ class Target(object): self._installed_modules = {} self._cache = {} self._shutils = None - self._file_transfer_cache = None self._max_async = max_async self.busybox = None @@ -477,10 +479,35 @@ class Target(object): self.wait_boot_complete(timeout) self.check_connection() self._resolve_paths() - self.execute('mkdir -p {}'.format(quote(self.working_directory))) - self.execute('mkdir -p {}'.format(quote(self.executables_directory))) + assert self.working_directory + if self.executables_directory is None: + self.executables_directory = self.path.join( + self.working_directory, + 'bin' + ) + + for path in (self.working_directory, self.executables_directory): + self.makedirs(path) + self.busybox = self.install(os.path.join(PACKAGE_BIN_DIRECTORY, self.abi, 'busybox'), timeout=30) self.conn.busybox = self.busybox + + # If neither the mktemp call nor _resolve_paths() managed to get a + # temporary directory, we just make one in the working directory. + if self.tmp_directory is None: + assert self.busybox + try: + tmp = await self.execute.asyn(f'{quote(self.busybox)} mktemp -d') + except Exception: + # Some Android platforms don't have a working mktemp unless + # TMPDIR is set, so we let AndroidTarget._resolve_paths() deal + # with finding a suitable location. + tmp = self.path.join(self.working_directory, 'tmp') + else: + tmp = tmp.strip() + self.tmp_directory = tmp + self.makedirs(self.tmp_directory) + self._detect_max_async(max_async or self._max_async) self.platform.update_from_target(self) self._update_modules('connected') @@ -535,33 +562,36 @@ class Target(object): """ Check that the connection works without obvious issues. """ - out = await self.execute.asyn('true', as_root=False) - if out.strip(): - raise TargetStableError('The shell seems to not be functional and adds content to stderr: {}'.format(out)) + async def check(**kwargs): + out = await self.execute.asyn('true', **kwargs) + if out: + raise TargetStableError('The shell seems to not be functional and adds content to stderr: {!r}'.format(out)) + + await check(as_root=False) + # If we are rooted, we usually run with sudo. Unfortunately, PAM + # modules can write random text to stdout such as: + # Your password will expire in XXX days. + if self.is_rooted: + await check(as_root=True) def disconnect(self): - connections = self._conn.get_all_values() - # Now that we have all the connection objects, we simply reset the TLS - # property so that the connections we got will not be reused anywhere. - del self._conn - - unused_conns = self._unused_conns - self._unused_conns.clear() - - for conn in itertools.chain(connections, self._unused_conns): - conn.close() + with self._lock: + thread_conns = self._conn.get_all_values() + # Now that we have all the connection objects, we simply reset the + # TLS property so that the connections we obtained will not be + # reused anywhere. + del self._conn - pool = self._async_pool - self._async_pool = None - if pool is not None: - pool.__exit__(None, None, None) + unused_conns = list(self._unused_conns) + self._unused_conns.clear() - with self._lock: - connections = self._conn.get_all_values() - for conn in itertools.chain(connections, self._unused_conns): + for conn in itertools.chain(thread_conns, unused_conns): conn.close() - if self._async_pool is not None: - self._async_pool.__exit__(None, None, None) + + pool = self._async_pool + self._async_pool = None + if pool is not None: + pool.__exit__(None, None, None) def __enter__(self): return self @@ -599,8 +629,6 @@ class Target(object): # Initialize modules which requires Busybox (e.g. shutil dependent tasks) self._update_modules('setup') - await self.execute.asyn('mkdir -p {}'.format(quote(self._file_transfer_cache))) - def reboot(self, hard=False, connect=True, timeout=180): if hard: if not self.has('hard_reset'): @@ -634,24 +662,11 @@ class Target(object): Context manager to provide a unique path in the transfer cache with the basename of the given name. """ - # Use a UUID to avoid race conditions on the target side - xfer_uuid = uuid.uuid4().hex - folder = self.path.join(self._file_transfer_cache, xfer_uuid) # Make sure basename will work on folders too name = os.path.normpath(name) - # Ensure the name is relative so that os.path.join() will actually - # join the paths rather than ignoring the first one. - name = './{}'.format(os.path.basename(name)) - - check_rm = False - try: - await self.makedirs.asyn(folder) - # Don't check the exit code as the folder might not even exist - # before this point, if creating it failed - check_rm = True - yield self.path.join(folder, name) - finally: - await self.execute.asyn('rm -rf -- {}'.format(quote(folder)), check_exit_code=check_rm) + name = os.path.basename(name) + async with self.make_temp() as tmp: + yield self.path.join(tmp, name) @asyn.asyncf async def _prepare_xfer(self, action, sources, dest, pattern=None, as_root=False): @@ -660,10 +675,22 @@ class Target(object): transfering multiple sources. """ - once = functools.lru_cache(maxsize=None) + def once(f): + cache = dict() + + @functools.wraps(f) + async def wrapper(path): + try: + return cache[path] + except KeyError: + x = await f(path) + cache[path] = x + return x + + return wrapper _target_cache = {} - def target_paths_kind(paths, as_root=False): + async def target_paths_kind(paths, as_root=False): def process(x): x = x.strip() if x == 'notexist': @@ -683,7 +710,7 @@ class Target(object): ) for path in _paths ) - res = self.execute(cmd, as_root=as_root) + res = await self.execute.asyn(cmd, as_root=as_root) _target_cache.update(zip(_paths, map(process, res.split()))) return [ @@ -692,7 +719,7 @@ class Target(object): ] _host_cache = {} - def host_paths_kind(paths, as_root=False): + async def host_paths_kind(paths, as_root=False): def path_kind(path): if os.path.isdir(path): return 'dir' @@ -719,47 +746,55 @@ class Target(object): src_excep = HostError src_path_kind = host_paths_kind - _dst_mkdir = once(self.makedirs) + _dst_mkdir = once(self.makedirs.asyn) dst_path_join = self.path.join dst_paths_kind = target_paths_kind - dst_remove_file = once(functools.partial(self.remove, as_root=as_root)) + + @once + async def dst_remove_file(path): + return await self.remove.asyn(path, as_root=as_root) elif action == 'pull': src_excep = TargetStableError src_path_kind = target_paths_kind - _dst_mkdir = once(functools.partial(os.makedirs, exist_ok=True)) + @once + async def _dst_mkdir(path): + return os.makedirs(path, exist_ok=True) dst_path_join = os.path.join dst_paths_kind = host_paths_kind - dst_remove_file = once(os.remove) + + @once + async def dst_remove_file(path): + return os.remove(path) else: raise ValueError('Unknown action "{}"'.format(action)) # Handle the case where path is None - def dst_mkdir(path): + async def dst_mkdir(path): if path: - _dst_mkdir(path) + await _dst_mkdir(path) - def rewrite_dst(src, dst): + async def rewrite_dst(src, dst): new_dst = dst_path_join(dst, os.path.basename(src)) - src_kind, = src_path_kind([src], as_root) + src_kind, = await src_path_kind([src], as_root) # Batch both checks to avoid a costly extra execute() - dst_kind, new_dst_kind = dst_paths_kind([dst, new_dst], as_root) + dst_kind, new_dst_kind = await dst_paths_kind([dst, new_dst], as_root) if src_kind == 'file': if dst_kind == 'dir': if new_dst_kind == 'dir': raise IsADirectoryError(new_dst) if new_dst_kind == 'file': - dst_remove_file(new_dst) + await dst_remove_file(new_dst) return new_dst else: return new_dst elif dst_kind == 'file': - dst_remove_file(dst) + await dst_remove_file(dst) return dst else: - dst_mkdir(os.path.dirname(dst)) + await dst_mkdir(os.path.dirname(dst)) return dst elif src_kind == 'dir': if dst_kind == 'dir': @@ -773,7 +808,7 @@ class Target(object): elif dst_kind == 'file': raise FileExistsError(dst_kind) else: - dst_mkdir(os.path.dirname(dst)) + await dst_mkdir(os.path.dirname(dst)) return dst else: raise FileNotFoundError(src) @@ -782,18 +817,19 @@ class Target(object): if not sources: raise src_excep('No file matching source pattern: {}'.format(pattern)) - if dst_paths_kind([dest]) != ['dir']: + if (await dst_paths_kind([dest])) != ['dir']: raise NotADirectoryError('A folder dest is required for multiple matches but destination is a file: {}'.format(dest)) + async def f(src): + return await rewrite_dst(src, dest) + mapping = await self.async_manager.map_concurrently(f, sources) + # TODO: since rewrite_dst() will currently return a different path for # each source, it will not bring anything. In order to be useful, # connections need to be able to understand that if the destination is # an empty folder, the source is supposed to be transfered into it with # the same basename. - return groupby_value({ - src: rewrite_dst(src, dest) - for src in sources - }) + return groupby_value(mapping) @asyn.asyncf @call_conn @@ -816,10 +852,11 @@ class Target(object): if as_root: for sources, dest in mapping.items(): - for source in sources: + async def f(source): async with self._xfer_cache_path(source) as device_tempfile: do_push([source], device_tempfile) await self.execute.asyn("mv -f -- {} {}".format(quote(device_tempfile), quote(dest)), as_root=True) + await self.async_manager.map_concurrently(f, sources) else: for sources, dest in mapping.items(): do_push(sources, dest) @@ -894,11 +931,13 @@ class Target(object): if via_temp: for sources, dest in mapping.items(): - for source in sources: + async def f(source): async with self._xfer_cache_path(source) as device_tempfile: - await self.execute.asyn("cp -r -- {} {}".format(quote(source), quote(device_tempfile)), as_root=as_root) - await self.execute.asyn("{} chmod 0644 -- {}".format(self.busybox, quote(device_tempfile)), as_root=as_root) + cp_cmd = f"{quote(self.busybox)} cp -rL -- {quote(source)} {quote(device_tempfile)}" + chmod_cmd = f"{quote(self.busybox)} chmod 0644 -- {quote(device_tempfile)}" + await self.execute.asyn(f"{cp_cmd} && {chmod_cmd}", as_root=as_root) do_pull([device_tempfile], dest) + await self.async_manager.map_concurrently(f, sources) else: for sources, dest in mapping.items(): do_pull(sources, dest) @@ -941,15 +980,17 @@ class Target(object): # execution def _prepare_cmd(self, command, force_locale): + tmpdir = f'TMPDIR={quote(self.tmp_directory)}' if self.tmp_directory else '' + # Force the locale if necessary for more predictable output if force_locale: # Use an explicit export so that the command is allowed to be any # shell statement, rather than just a command invocation - command = 'export LC_ALL={} && {}'.format(quote(force_locale), command) + command = f'export LC_ALL={quote(force_locale)} {tmpdir} && {command}' # Ensure to use deployed command when availables if self.executables_directory: - command = "export PATH={}:$PATH && {}".format(quote(self.executables_directory), command) + command = f"export PATH={quote(self.executables_directory)}:$PATH && {command}" return command @@ -1175,7 +1216,7 @@ fi raise @asyn.asynccontextmanager - async def make_temp(self, is_directory=True, directory='', prefix='devlib-test'): + async def make_temp(self, is_directory=True, directory=None, prefix=None): """ Creates temporary file/folder on target and deletes it once it's done. @@ -1193,10 +1234,11 @@ fi :rtype: str """ - directory = directory or self.working_directory + directory = directory or self.tmp_directory + prefix = f'{prefix}-' if prefix else '' temp_obj = None try: - cmd = f'mktemp -p {quote(directory)} {quote(prefix)}-XXXXXX' + cmd = f'mktemp -p {quote(directory)} {quote(prefix)}XXXXXX' if is_directory: cmd += ' -d' @@ -1293,13 +1335,15 @@ fi return self.path.join(self.working_directory, name) @asyn.asyncf - async def tempfile(self, prefix='', suffix=''): - name = '{prefix}_{uuid}_{suffix}'.format( + async def tempfile(self, prefix=None, suffix=None): + prefix = f'{prefix}-' if prefix else '' + sufix = f'-{suffix}' if suffix else '' + name = '{prefix}{uuid}{suffix}'.format( prefix=prefix, uuid=uuid.uuid4().hex, suffix=suffix, ) - path = self.get_workpath(name) + self.path.join(self.tmp_directory, name) if (await self.file_exists.asyn(path)): raise FileExistsError('Path already exists on the target: {}'.format(path)) else: @@ -1767,7 +1811,9 @@ class LinuxTarget(Target): connection_settings=None, platform=None, working_directory=None, + *, executables_directory=None, + tmp_directory=None, connect=True, modules=None, load_default_modules=True, @@ -1780,6 +1826,7 @@ class LinuxTarget(Target): platform=platform, working_directory=working_directory, executables_directory=executables_directory, + tmp_directory=tmp_directory, connect=connect, modules=modules, load_default_modules=load_default_modules, @@ -1868,10 +1915,8 @@ class LinuxTarget(Target): def _resolve_paths(self): if self.working_directory is None: + # This usually lands in the home directory self.working_directory = self.path.join(self.execute("pwd").strip(), 'devlib-target') - self._file_transfer_cache = self.path.join(self.working_directory, '.file-cache') - if self.executables_directory is None: - self.executables_directory = self.path.join(self.working_directory, 'bin') class AndroidTarget(Target): @@ -1977,7 +2022,9 @@ class AndroidTarget(Target): connection_settings=None, platform=None, working_directory=None, + *, executables_directory=None, + tmp_directory=None, connect=True, modules=None, load_default_modules=True, @@ -1991,6 +2038,7 @@ class AndroidTarget(Target): platform=platform, working_directory=working_directory, executables_directory=executables_directory, + tmp_directory=tmp_directory, connect=connect, modules=modules, load_default_modules=load_default_modules, @@ -2587,9 +2635,16 @@ class AndroidTarget(Target): def _resolve_paths(self): if self.working_directory is None: self.working_directory = self.path.join(self.external_storage, 'devlib-target') - self._file_transfer_cache = self.path.join(self.working_directory, '.file-cache') + if self.tmp_directory is None: + # Do not rely on the generic default here, as we need to provide an + # android-specific default in case it fails. + try: + tmp = self.execute(f'{quote(self.busybox)} mktemp -d') + except Exception: + tmp = '/data/local/tmp' + self.tmp_directory = tmp if self.executables_directory is None: - self.executables_directory = '/data/local/tmp/bin' + self.executables_directory = self.path.join(self.tmp_directory, 'bin') @asyn.asyncf async def _ensure_executables_directory_is_writable(self): @@ -3056,7 +3111,9 @@ class LocalLinuxTarget(LinuxTarget): connection_settings=None, platform=None, working_directory=None, + *, executables_directory=None, + tmp_directory=None, connect=True, modules=None, load_default_modules=True, @@ -3069,6 +3126,7 @@ class LocalLinuxTarget(LinuxTarget): platform=platform, working_directory=working_directory, executables_directory=executables_directory, + tmp_directory=tmp_directory, connect=connect, modules=modules, load_default_modules=load_default_modules, @@ -3080,9 +3138,6 @@ class LocalLinuxTarget(LinuxTarget): def _resolve_paths(self): if self.working_directory is None: self.working_directory = '/tmp/devlib-target' - self._file_transfer_cache = self.path.join(self.working_directory, '.file-cache') - if self.executables_directory is None: - self.executables_directory = '/tmp/devlib-target/bin' def _get_model_name(section): @@ -3143,6 +3198,7 @@ class ChromeOsTarget(LinuxTarget): connection_settings=None, platform=None, working_directory=None, + *, executables_directory=None, android_working_directory=None, android_executables_directory=None, @@ -3175,6 +3231,7 @@ class ChromeOsTarget(LinuxTarget): platform=platform, working_directory=working_directory, executables_directory=executables_directory, + tmp_directory=tmp_directory, connect=False, modules=modules, load_default_modules=load_default_modules, @@ -3243,6 +3300,3 @@ class ChromeOsTarget(LinuxTarget): def _resolve_paths(self): if self.working_directory is None: self.working_directory = '/mnt/stateful_partition/devlib-target' - self._file_transfer_cache = self.path.join(self.working_directory, '.file-cache') - if self.executables_directory is None: - self.executables_directory = self.path.join(self.working_directory, 'bin') diff --git a/external/devlib/devlib/utils/android.py b/external/devlib/devlib/utils/android.py index bfc88928485ec47dbd5fdafcfe8e29a90f34f075..001cb93be373e3df799f58dc5d7788b1743ddaf9 100755 --- a/external/devlib/devlib/utils/android.py +++ b/external/devlib/devlib/utils/android.py @@ -305,13 +305,14 @@ class AdbConnection(ConnectionBase): self.adb_server = adb_server self.adb_port = adb_port self.adb_as_root = adb_as_root + self._restore_to_adb_root = False lock, nr_active = AdbConnection.active_connections with lock: nr_active[self.device] += 1 if self.adb_as_root: try: - self.adb_root(enable=True) + self._restore_to_adb_root = self._adb_root(enable=True) # Exception will be raised if we are not the only connection # active. adb_root() requires restarting the server, which is not # acceptable if other connections are active and can apparently @@ -411,7 +412,7 @@ class AdbConnection(ConnectionBase): if disconnect: if self.adb_as_root: - self.adb_root(enable=False) + self.adb_root(enable=self._restore_to_adb_root) adb_disconnect(self.device, self.adb_server, self.adb_port) def cancel_running_command(self): @@ -421,6 +422,9 @@ class AdbConnection(ConnectionBase): pass def adb_root(self, enable=True): + self._adb_root(enable=enable) + + def _adb_root(self, enable): lock, nr_active = AdbConnection.active_connections with lock: can_root = nr_active[self.device] <= 1 @@ -428,20 +432,24 @@ class AdbConnection(ConnectionBase): if not can_root: raise AdbRootError('Can only restart adb server if no other connection is active') + def is_rooted(out): + return 'adbd is already running as root' in out + cmd = 'root' if enable else 'unroot' try: output = adb_command(self.device, cmd, timeout=30, adb_server=self.adb_server, adb_port=self.adb_port) except subprocess.CalledProcessError as e: + was_rooted = is_rooted(e.output) # Ignore if we're already root - if 'adbd is already running as root' in e.output: - pass - else: + if not was_rooted: raise AdbRootError(str(e)) from e else: + was_rooted = is_rooted(output) # Check separately as this does not cause a error exit code. if 'cannot run as root in production builds' in output: raise AdbRootError(output) AdbConnection._connected_as_root[self.device] = enable + return was_rooted def wait_for_device(self, timeout=30): adb_command(self.device, 'wait-for-device', timeout, self.adb_server, self.adb_port) diff --git a/external/devlib/devlib/utils/asyn.py b/external/devlib/devlib/utils/asyn.py index fd518d90580a0ee1e9b34580afc0032f57b009f3..dd6d42d598df00d689621b1719a456e12474bccd 100644 --- a/external/devlib/devlib/utils/asyn.py +++ b/external/devlib/devlib/utils/asyn.py @@ -209,6 +209,7 @@ class _AsyncPolymorphicFunction: def __init__(self, asyn, blocking): self.asyn = asyn self.blocking = blocking + functools.update_wrapper(self, asyn) def __get__(self, *args, **kwargs): return self.__class__( @@ -216,6 +217,12 @@ class _AsyncPolymorphicFunction: blocking=self.blocking.__get__(*args, **kwargs), ) + # Ensure inspect.iscoroutinefunction() does not detect us as being async, + # since __call__ is not. + @property + def __code__(self): + return self.__call__.__code__ + def __call__(self, *args, **kwargs): return self.blocking(*args, **kwargs) diff --git a/external/devlib/devlib/utils/ssh.py b/external/devlib/devlib/utils/ssh.py index b499f64b96b446a9e9164819b1939a6db8026dbf..caa0f44251d50782821ec4027c60187f0245fee4 100644 --- a/external/devlib/devlib/utils/ssh.py +++ b/external/devlib/devlib/utils/ssh.py @@ -61,7 +61,8 @@ from devlib.utils.types import boolean from devlib.connection import ConnectionBase, ParamikoBackgroundCommand, SSHTransferHandle -DEFAULT_SSH_SUDO_COMMAND = "sudo -k -p ' ' -S -- sh -c {}" +# Empty prompt with -p '' to avoid adding a leading space to the output. +DEFAULT_SSH_SUDO_COMMAND = "sudo -k -p '' -S -- sh -c {}" class _SSHEnv: @@ -498,7 +499,18 @@ class SshConnection(SshConnectionBase): push(sftp, src, dst, callback) def _pull_file(self, sftp, src, dst, callback): - sftp.get(src, dst, callback=callback) + try: + sftp.get(src, dst, callback=callback) + except Exception as e: + # A file may have been created by Paramiko, but we want to clean + # that up, particularly if we tried to pull a folder and failed, + # otherwise this will make subsequent attempts at pulling the + # folder fail since the destination will exist. + try: + os.remove(dst) + except Exception: + pass + raise e def _pull_folder(self, sftp, src, dst, callback): os.makedirs(dst) diff --git a/external/devlib/tools/android/setup_host.sh b/external/devlib/tools/android/setup_host.sh index 60b283cc2b04fb147abaca64b7eaab3afb04fed7..ff0340158aa8937a4f768e8da8fc935676556543 100755 --- a/external/devlib/tools/android/setup_host.sh +++ b/external/devlib/tools/android/setup_host.sh @@ -187,7 +187,6 @@ install_pacman() { # APT-based distributions like Ubuntu or Debian apt_packages=( - cpu-checker libarchive-tools qemu-user-static wget diff --git a/external/subtrees.conf b/external/subtrees.conf index 69006f922cf1accb65ced105248e68fcd9759215..76d1f2dff1637bba259e2e73b87e534f21e7445d 100644 --- a/external/subtrees.conf +++ b/external/subtrees.conf @@ -15,5 +15,10 @@ ref = lisa [workload-automation] path = external/workload-automation -url = https://github.com/ARM-Software/workload-automation.git -ref = master +# url = https://github.com/ARM-Software/workload-automation.git +# ref = master + +# See external/workload-automation.manifest.yaml for instructions on how to +# build this branch: +url = https://github.com/douglas-raillard-arm/workload-automation.git +ref = lisa diff --git a/external/workload-automation.manifest.yaml b/external/workload-automation.manifest.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ec236d82d02044892d201a99add100fed768543 --- /dev/null +++ b/external/workload-automation.manifest.yaml @@ -0,0 +1,32 @@ + +# batch-rebase manifest used to create the WA branch for LISA + +# The "lisa" branch in the WA repo can be constructed with: +# batch-rebase create . --manifest "$LISA_HOME/external/workload-automation.manifest.yaml" --create-branch lisa + +rebase-conf: + rr-cache: ./rr-cache + base: + remote: upstream + ref: master + + topics: + - + remote: metin + base: master + tip: tracing-mode + + # TODO: Add this one once Luis made a proper topic branch for it: + # https://github.com/ARM-software/workload-automation/pull/1268 + # - + # remote: luis + # base: master + # tip: + + remotes: + upstream: + url: https://github.com/ARM-Software/workload-automation.git + metin: + url: https://github.com/metin-arm/workload-automation.git + luis: + url: https://github.com/luis-machado-arm/workload-automation.git diff --git a/external/workload-automation/doc/source/user_information/user_guide.rst b/external/workload-automation/doc/source/user_information/user_guide.rst index 535eef5e6ad060fc0eb890a009c5a1356c231cf2..5ec20cb017652b78332112a91bd845d153d1bf99 100644 --- a/external/workload-automation/doc/source/user_information/user_guide.rst +++ b/external/workload-automation/doc/source/user_information/user_guide.rst @@ -400,6 +400,7 @@ below: no_install: false report: true report_on_target: false + mode: write-to-memory csv: extra_columns: null use_all_classifiers: false diff --git a/external/workload-automation/doc/source/user_information/user_reference/agenda.rst b/external/workload-automation/doc/source/user_information/user_reference/agenda.rst index 9b9d6984147505d4d518dbec2447e36ccd6ca092..9404e247967fe232af75f893f31b333afcdee6cb 100644 --- a/external/workload-automation/doc/source/user_information/user_reference/agenda.rst +++ b/external/workload-automation/doc/source/user_information/user_reference/agenda.rst @@ -45,6 +45,7 @@ An example agenda can be seen here: no_install: false report: true report_on_target: false + mode: write-to-disk csv: # Provide config for the csv augmentation use_all_classifiers: true diff --git a/external/workload-automation/requirements.txt b/external/workload-automation/requirements.txt index de0f3c5365a5f13aefc51e351bf0d7b3a900b0fb..6957866b39cd84214aea8212e9c0dabe8f88acb2 100644 --- a/external/workload-automation/requirements.txt +++ b/external/workload-automation/requirements.txt @@ -1,9 +1,9 @@ bcrypt==4.0.1 -certifi==2023.7.22 +certifi==2024.7.4 cffi==1.15.1 charset-normalizer==3.1.0 colorama==0.4.6 -cryptography==42.0.4 +cryptography==43.0.1 devlib==1.3.4 future==0.18.3 idna==3.7 @@ -25,6 +25,6 @@ requests==2.32.0 scp==0.14.5 six==1.16.0 tzdata==2023.3 -urllib3==1.26.18 +urllib3==1.26.19 wlauto==3.3.1 wrapt==1.15.0 diff --git a/external/workload-automation/wa/instruments/trace_cmd.py b/external/workload-automation/wa/instruments/trace_cmd.py index 23a03e313cb865548a69212c75a0f447e4c95e58..a24b9b45d6f2265aaf4b7829e5261851559f293e 100644 --- a/external/workload-automation/wa/instruments/trace_cmd.py +++ b/external/workload-automation/wa/instruments/trace_cmd.py @@ -162,6 +162,12 @@ class TraceCmdInstrument(Instrument): installed on the host (the one in your distribution's repos may be too old). """), + Parameter('mode', kind=str, default='write-to-memory', + description=""" + Specifies whether collected traces should be saved in memory or disk. + Extensive workloads may hit out of memory issue. Hence, write-to-disk + mode can help in such cases. + """), ] def __init__(self, target, **kwargs): @@ -183,6 +189,7 @@ class TraceCmdInstrument(Instrument): no_install=self.no_install, strict=False, report_on_target=False, + mode=self.mode, ) if self.report and self.report_on_target: collector_params['autoreport'] = True @@ -215,12 +222,14 @@ class TraceCmdInstrument(Instrument): if not self.collector: return self.logger.info('Extracting trace from target...') - outfile = os.path.join(context.output_directory, 'trace.dat') + outfile = os.path.join(context.output_directory, OUTPUT_TRACE_FILE) + self.collector.set_output(outfile) self.collector.get_data() context.add_artifact('trace-cmd-bin', outfile, 'data') if self.report: - textfile = os.path.join(context.output_directory, 'trace.txt') + textfile = os.path.join(context.output_directory, OUTPUT_TEXT_FILE) + if not self.report_on_target: self.collector.report(outfile, textfile) context.add_artifact('trace-cmd-txt', textfile, 'export')