diff --git a/.gitignore b/.gitignore index 9d7e1ccc342e02b07c681866bb84277c66c3f6e5..e86bc51e47dce3dbc9b3342b62dfa5291c693ed3 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ /ipynb/*.pid /ipynb/server.url /vagrant +/tools/wa_venv +/tools/wa_user_directory/dependencies diff --git a/ipynb/wltests/WA3_Compare.ipynb b/ipynb/wltests/WA3_Compare.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..8325178c0c8a762bc28435ae3e6db3ebcfb4bee2 --- /dev/null +++ b/ipynb/wltests/WA3_Compare.ipynb @@ -0,0 +1,286 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example use of `WaResultsCollector`\n", + "`WaResultsCollector` collects, analyses and visualises results from Workload Automation 3. Let's look at its docstring." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import logging\n", + "from IPython.display import display\n", + "\n", + "from wa_results_collector import WaResultsCollector" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from conf import LisaLogging\n", + "LisaLogging.setup()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print WaResultsCollector.__doc__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# You can configure the logging level for the WaResultsCollector\n", + "# by tuning its loglevel setting in the LISA_HOME/config.log" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Results collection\n", + "\n", + "If you have a LISA platform description for the platform the tests were run on, you can set it here to get extra metrics from trace analysis. If you set it to `None` you will still be able to anlyse all the non-trace-derived metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from libs.utils.platforms import hikey960\n", + "# platform = hikey960\n", + "platform = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "collector = WaResultsCollector(base_dir='../../results/wltests/', # Base path of your results folders\n", + " #wa_dirs='(substring_to_match)', # Parse only folder matching this regexp\n", + " #parse_traces=False, # Disable traces parsing if you don't care about trace metrics\n", + " platform=platform,\n", + " kernel_repo_path='~/sources/linux')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Collected Metrics" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = collector.results_df\n", + "logging.info(\"Metrics available for plots and analysis:\")\n", + "for metric in df.metric.unique().tolist():\n", + " logging.info(\" %s\", metric)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Jankbench" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Total Frame Duration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='jankbench', metric='frame_total_duration',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Energy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='jankbench', metric='device_total_energy',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Frames Duration CDF" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.plot_cdf(workload='jankbench', metric='frame_total_duration', test=test, threshold=16)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exoplayer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dropper Frames" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for test in collector.tests(workload='exoplayer'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='exoplayer', metric='exoplayer_dropped_frames',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Energy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for test in collector.tests(workload='exoplayer'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='exoplayer', metric='device_total_energy',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generic comparison plots\n", + "`plot_comparisons` can be used to automatically discover metrics that changed between different kernel versions or tags. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "collector.plot_comparisons(base_id=df['kernel'].iloc[0], by='kernel')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.12" + }, + "toc": { + "colors": { + "hover_highlight": "#DAA520", + "running_highlight": "#FF0000", + "selected_highlight": "#FFD700" + }, + "moveMenuLeft": true, + "nav_menu": { + "height": "100px", + "width": "252px" + }, + "navigate_menu": true, + "number_sections": true, + "sideBar": true, + "threshold": 4, + "toc_cell": false, + "toc_section_display": "block", + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/devlib b/libs/devlib index dc453ad8916cfb914c9dafaad8b0b440d3a4b443..af0ed2ab4806fa15edec3f16c83c651da82a5757 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit dc453ad8916cfb914c9dafaad8b0b440d3a4b443 +Subproject commit af0ed2ab4806fa15edec3f16c83c651da82a5757 diff --git a/libs/utils/__init__.py b/libs/utils/__init__.py index 294c6a063c041e8e2322bb0e67e2e9c2d785421b..7b34afc9cab7bc331c6d85e0fc54a50f909c5ad9 100644 --- a/libs/utils/__init__.py +++ b/libs/utils/__init__.py @@ -31,4 +31,6 @@ from report import Report from analysis_register import AnalysisRegister from analysis_module import AnalysisModule +from git import Git + import android diff --git a/libs/utils/analysis/cpus_analysis.py b/libs/utils/analysis/cpus_analysis.py index d28f5b20cc7a13a5ebe7dd5a478e88d22b71f198..416be843ca151b342837d4eda39c4ff9e908e261 100644 --- a/libs/utils/analysis/cpus_analysis.py +++ b/libs/utils/analysis/cpus_analysis.py @@ -61,6 +61,32 @@ class CpusAnalysis(AnalysisModule): ctx_sw_df.index.name = 'cpu' return ctx_sw_df + def _dfg_cpu_wakeups(self, cpus=None): + """" + Get a DataFrame showing when a CPU was woken from idle + + :param cpus: List of CPUs to find wakeups for. If None, all CPUs. + :type cpus: list(int) or None + + :returns: :mod:`pandas.DataFrame` with one column ``cpu``, where each + row shows a time when the given ``cpu`` was woken up from + idle. + """ + if not self._trace.hasEvents('cpu_idle'): + self._log.warning('Events [cpu_idle] not found, cannot ' + 'get CPU wakeup events.') + return None + + cpus = cpus or range(self._trace.platform['cpus_count']) + + sr = pd.Series() + for cpu in cpus: + cpu_sr = self._trace.getCPUActiveSignal(cpu) + cpu_sr = cpu_sr[cpu_sr == 1] + cpu_sr = cpu_sr.replace(1, cpu) + sr = sr.append(cpu_sr) + + return pd.DataFrame({'cpu': sr}).sort_index() ############################################################################### # Plotting Methods diff --git a/libs/utils/git.py b/libs/utils/git.py new file mode 100644 index 0000000000000000000000000000000000000000..6a9e07c33054025ca85a446f1cb771bf8deac365 --- /dev/null +++ b/libs/utils/git.py @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os +import subprocess + +class Git(object): + + @staticmethod + def find_shortest_symref(repo_path, sha1): + """ + Find the shortest symbolic reference (branch/tag) to a Git SHA1 + + :param repo_path: the path of a valid git repository + :type repo_path: str + + :param sha1: the SAH1 of a commit to lookup the reference for + :type sha1: str + + Returns None if nothing points to the requested SHA1 + """ + repo_path = os.path.expanduser(repo_path) + possibles = [] + # Can't use git for-each-ref --points-at because it only came in in Git 2.7 + # which is not in Ubuntu 14.04 - check by hand instead. + branches = subprocess.check_output( + "git for-each-ref --sort=-committerdate " + "--format='%(objectname:short) %(refname:short)' " + "refs/heads/ refs/remotes/ refs/tags", + cwd=repo_path, shell=True) + for line in branches.splitlines(): + try: + sha1_out, name = line.strip().split() + except: + continue + if sha1_out[:7] == sha1[:7]: + possibles.append(name) + if not possibles: + return None + + return min(possibles, key=len) + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/trace.py b/libs/utils/trace.py index 93a379e8821c0ad5da06ef7b70a7bfcff96b9362..c7adabdb3ab5108adc25d25d5e68de1da2adcfa7 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -847,6 +847,10 @@ class Trace(object): ) active.fillna(method='ffill', inplace=True) + # There might be NaNs in the signal where we got data from some CPUs + # before others. That will break the .astype(int) below, so drop rows + # with NaN in them. + active.dropna(inplace=True) # Cluster active is the OR between the actives on each CPU # belonging to that specific cluster diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py new file mode 100644 index 0000000000000000000000000000000000000000..d07df7236cd31b5c9f6f9c3568200d04e52e76be --- /dev/null +++ b/libs/utils/wa_results_collector.py @@ -0,0 +1,1092 @@ +# Copyright 2017 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import namedtuple, defaultdict +import csv +import json +import numpy as np +import re +import os +import pandas as pd +import subprocess +import logging +import warnings + +from scipy.stats import ttest_ind +import matplotlib.cm as cm +import matplotlib.pyplot as plt +from matplotlib.colors import to_hex + +from conf import LisaLogging + +from bart.common.Utils import area_under_curve +from devlib.target import KernelVersion +from trappy.utils import handle_duplicate_index + +from IPython.display import display + +from trace import Trace +from git import Git + +class WaResultsCollector(object): + """ + Collects, analyses and visualises results from multiple WA3 directories + + Takes a list of output directories from Workload Automation 3 and parses + them. Finds metrics reported by WA itself, and extends those metrics with + extra detail extracted from ftrace files, energy instrumentation output, and + workload-specific artifacts that are found in the output. + + Results can be grouped according to the following terms: + + - 'metric' is a specific measurable quantity such as a single frame's + rendering time or the average energy consumed during a workload run. + + - 'workload' is the general name of a workload such as 'jankbench' or + 'youtube'. + + - 'test' is a more specific identification for workload - for example this + might identify one of Jankbench's sub-benchmarks, or specifically playing + a certain video on Youtube for 30s. + + WaResultsCollector ultimately derives 'test' names from the + 'classifiers'::'test' field of the WA3 agenda file's 'workloads' entries. + + - 'tag' is an identifier for a set of run-time target configurations that + the target was run under. For example there might exist one 'tag' + identifying running under the schedutil governor and another for the + performance governor. + + WaResultsCollector ultimately derives 'tag' names from the 'classifiers' + field of the WA3 agenda file's 'sections' entries. + + - 'kernel' identifies the kernel that was running when the metric was + collected. This may be a SHA1 or a symbolic ref (branch/tag) derived from + a provided Git repository. To try to keep identifiers readable, common + prefixes of refs are removed: if the raw refs are 'test/foo/bar' and + 'test/foo/baz', they will be referred to just as 'bar' and 'baz'. + + Aside from the provided helper attributes, all metrics are exposed in a + DataFrame as the ``results_df`` attribute. + + :param wa_dirs: List of paths to WA3 output directories or a regexp of WA3 + output directories names to consider starting from the + specified base_path + :type wa_dirs: str + + :param base_dir: The path of a directory containing a collection of WA3 + output directories + :type base_dir: str + + :param platform: Optional LISA platform description. If provided, used to + enrich extra metrics gleaned from trace analysis. + + :param kernel_repo_path: Optional path to kernel repository. WA3 reports the + SHA1 of the kernel that workloads were run against. If this + param is provided, the repository is search for symbolic + references to replace SHA1s in data representation. This is + purely to make the output more manageable for humans. + + :param parse_traces: This class uses LISA to parse and analyse ftrace files + for extra metrics. With multiple/large traces this + can take some time. Set this param to False to disable + trace parsing. + + :param use_cached_trace_metrics: This class uses LISA to parse and analyse + ftrace files for extra metrics. With multiple/large traces + this can take some time, so the extracted metrics are + cached in the provided output directories. Set this param + to False to disable this caching. + """ + def __init__(self, base_dir=None, wa_dirs=".*", platform=None, + kernel_repo_path=None, parse_traces=True, + use_cached_trace_metrics=True): + + self._log = logging.getLogger('WaResultsCollector') + + if base_dir: + base_dir = os.path.expanduser(base_dir) + if not isinstance(wa_dirs, basestring): + raise ValueError( + 'If base_dir is provided, wa_dirs should be a regexp') + regex = wa_dirs + wa_dirs = self._list_wa_dirs(base_dir, regex) + if not wa_dirs: + raise ValueError("Couldn't find any WA results matching '{}' in {}" + .format(regex, base_dir)) + else: + if not hasattr(wa_dirs, '__iter__'): + raise ValueError( + 'if base_dir is not provided, wa_dirs should be a list of paths') + + + wa_dirs = [os.path.expanduser(p) for p in wa_dirs] + + self.platform = platform + self.parse_traces = parse_traces + if not self.parse_traces: + self._log.warning("Trace parsing disabled") + self.use_cached_trace_metrics = use_cached_trace_metrics + + df = pd.DataFrame() + for wa_dir in wa_dirs: + df = df.append(self._read_wa_dir(wa_dir)) + + kernel_refs = {} + if kernel_repo_path: + for sha1 in df['kernel_sha1'].unique(): + ref = Git.find_shortest_symref(kernel_repo_path, sha1) + if ref: + kernel_refs[sha1] = ref + + common_prefix = os.path.commonprefix(kernel_refs.values()) + for sha1, ref in kernel_refs.iteritems(): + kernel_refs[sha1] = ref[len(common_prefix):] + + df['kernel'] = df['kernel_sha1'].replace(kernel_refs) + + self.results_df = df + + def _list_wa_dirs(self, base_dir, wa_dirs_re): + dirs = [] + self._log.info("Processing WA3 dirs matching [%s], rooted at %s", + wa_dirs_re, base_dir) + wa_dirs_re = re.compile(wa_dirs_re) + + for subdir in os.listdir(base_dir): + dir = os.path.join(base_dir, subdir) + if not os.path.isdir(dir) or not wa_dirs_re.search(subdir): + continue + + # WA3 results dirs contains a __meta directory at the top level. + if '__meta' not in os.listdir(dir): + self.log.warning('Ignoring {}, does not contain __meta directory') + continue + + dirs.append(dir) + + return dirs + + def _read_wa_dir(self, wa_dir): + """ + Get a DataFrame of metrics from a single WA3 output directory. + + Includes the extra metrics derived from workload-specific artifacts and + ftrace files. + + Columns returned: + + kernel_sha1,kernel,id,workload,tag,test,iteration,metric,value,units + """ + # A WA output directory looks something like: + # + # wa_output/ + # |- __meta/ + # | | - jobs.json + # | | (some other bits) + # |- results.csv + # |- pelt-wk1-jankbench-1/ + # | | - result.json + # | | (other results from iteration 1 of pelt-wk1, which is a + # | | jankbench job) + # |- pelt-wk1-jankbench-2/ + # [etc] + + # results.csv contains all the metrics reported by WA for all jobs. + df = pd.read_csv(os.path.join(wa_dir, 'results.csv')) + + # __meta/jobs.json describes the jobs that were run - we can use this to + # find extra artifacts (like traces and detailed energy measurement + # data) from the jobs, which we'll use to add additional metrics that WA + # didn't report itself. + with open(os.path.join(wa_dir, '__meta', 'jobs.json')) as f: + jobs = json.load(f)['jobs'] + + subdirs_done = [] + + # Keep track of how many times we've seen each job id so we know which + # iteration to look at (If we use the proper WA3 API this awkwardness + # isn't necessary). + next_iteration = defaultdict(lambda: 1) + + # Keep track of which jobs we skipped for each iteration + skipped_jobs = defaultdict(lambda: []) + + # Dicts mapping job IDs to things determined about the job - this will + # be used to add extra columns to the DataFrame (that aren't reported + # directly in WA's results.csv) + tag_map = {} + test_map = {} + job_dir_map = {} + + for job in jobs: + workload = job['workload_name'] + + job_id = job['id'] + + # If there's a 'tag' in the 'classifiers' object, use that to + # identify the runtime configuration. If not, use a representation + # of the full key=value pairs. + classifiers = job['classifiers'] or {} + + if 'test' in classifiers: + # If the workload spec has a 'test' classifier, use that to + # identify it. + test = classifiers.pop('test') + elif 'test' in job['workload_parameters']: + # If not, some workloads have a 'test' workload_parameter, try + # using that + test = job['workload_parameters']['test'] + else: + # Otherwise just use the workload name. + # This isn't ideal because it means the results from jobs with + # different workload parameters will be amalgamated. + test = workload + + rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems()) + tag = classifiers.get('tag', rich_tag) + + if job_id in tag_map: + # Double check I didn't do a stupid + if tag_map[job_id] != tag: + raise RuntimeError('Multiple tags ({}, {}) found for job ID {}' + .format(tag, tag_map[job_id], job_id)) + tag_map[job_id] = tag + + if job_id in test_map: + # Double check I didn't do a stupid + if test_map[job_id] != test: + raise RuntimeError('Multiple tests ({}, {}) found for job ID {}' + .format(test, test_map[job_id], job_id)) + test_map[job_id] = test + + iteration = next_iteration[job_id] + next_iteration[job_id] += 1 + + job_dir = os.path.join(wa_dir, + '-'.join([job_id, workload, str(iteration)])) + + job_dir_map[job_id] = job_dir + + # Jobs can fail due to target misconfiguration or other problems, + # without preventing us from collecting the results for the jobs + # that ran OK. + with open(os.path.join(job_dir, 'result.json')) as f: + job_result = json.load(f) + if job_result['status'] == 'FAILED': + skipped_jobs[iteration].append(job_id) + continue + + extra_df = self._get_extra_job_metrics(job_dir, workload) + if extra_df.empty: + continue + + extra_df.loc[:, 'workload'] = workload + extra_df.loc[:, 'iteration'] = iteration + extra_df.loc[:, 'id'] = job_id + extra_df.loc[:, 'tag'] = tag + extra_df.loc[:, 'test'] = test + + df = df.append(extra_df) + + for iteration, job_ids in skipped_jobs.iteritems(): + self._log.warning("Skipped failed iteration %d for jobs:", iteration) + self._log.warning(" %s", ', '.join(job_ids)) + + df['tag'] = df['id'].replace(tag_map) + df['test'] = df['id'].replace(test_map) + # TODO: This is a bit lazy: we're storing the directory that every + # single metric came from in a DataFrame column. That's redundant really + # - instead, to get from a row in results_df to a job output directory, + # we should just store a mapping from kernel identifiers to wa_output + # directories, then derive at the job dir from that mapping plus the + # job_id+workload+iteration in the results_df row. This works fine for + # now, though - that refactoring would probably belong alongside a + # refactoring to use WA's own API for reading output directories. + df['_job_dir'] = df['id'].replace(job_dir_map) + df.loc[:, 'kernel_sha1'] = self._wa_get_kernel_sha1(wa_dir) + + return df + + def _get_trace_metrics(self, trace_path): + """ + Parse a trace (or used cached results) and extract extra metrics from it + + Returns a DataFrame with columns: + + metric,value,units + """ + cache_path = os.path.join(os.path.dirname(trace_path), 'lisa_trace_metrics.csv') + if self.use_cached_trace_metrics and os.path.exists(cache_path): + return pd.read_csv(cache_path) + + # I wonder if this should go in LISA itself? Probably. + + metrics = [] + events = ['irq_handler_entry', 'cpu_frequency', 'nohz_kick', 'sched_switch', + 'sched_load_cfs_rq', 'sched_load_avg_task', 'thermal_temperature'] + trace = Trace(self.platform, trace_path, events) + + if hasattr(trace.data_frame, 'cpu_wakeups'): # Not merged in LISA yet + metrics.append(('cpu_wakeup_count', len(trace.data_frame.cpu_wakeups()), None)) + + # Helper to get area under curve of multiple CPU active signals + def get_cpu_time(trace, cpus): + df = pd.DataFrame([trace.getCPUActiveSignal(cpu) for cpu in cpus]) + return df.sum(axis=1).sum(axis=0) + + clusters = trace.platform.get('clusters') + if clusters: + for cluster in clusters.values(): + name = '-'.join(str(c) for c in cluster) + + df = trace.data_frame.cluster_frequency_residency(cluster) + if df is None or df.empty: + self._log.warning("Can't get cluster freq residency from %s", + trace.data_dir) + else: + df = df.reset_index() + avg_freq = (df.frequency * df.time).sum() / df.time.sum() + metric = 'avg_freq_cluster_{}'.format(name) + metrics.append((metric, avg_freq, 'MHz')) + + df = trace.data_frame.trace_event('cpu_frequency') + df = df[df.cpu == cluster[0]] + metrics.append(('freq_transition_count_{}'.format(name), len(df), None)) + + active_time = area_under_curve(trace.getClusterActiveSignal(cluster)) + metrics.append(('active_time_cluster_{}'.format(name), + active_time, 'seconds')) + + metrics.append(('cpu_time_cluster_{}'.format(name), + get_cpu_time(trace, cluster), 'cpu-seconds')) + + metrics.append(('cpu_time_total', + get_cpu_time(trace, range(trace.platform['cpus_count'])), + 'cpu-seconds')) + + event = None + if trace.hasEvents('sched_load_cfs_rq'): + event = 'sched_load_cfs_rq' + row_filter = lambda r: r.path == '/' + column = 'util' + elif trace.hasEvents('sched_load_avg_cpu'): + event = 'sched_load_avg_cpu' + row_filter = lambda r: True + column = 'util_avg' + if event: + df = trace.data_frame.trace_event(event) + util_sum = (handle_duplicate_index(df)[row_filter] + .pivot(columns='cpu')[column].ffill().sum(axis=1)) + avg_util_sum = area_under_curve(util_sum) / (util_sum.index[-1] - util_sum.index[0]) + metrics.append(('avg_util_sum', avg_util_sum, None)) + + if trace.hasEvents('thermal_temperature'): + df = trace.data_frame.trace_event('thermal_temperature') + for zone, zone_df in df.groupby('thermal_zone'): + metrics.append(('tz_{}_start_temp'.format(zone), + zone_df.iloc[0]['temp_prev'], + 'milliCelcius')) + + if len(zone_df == 1): # Avoid division by 0 + avg_tmp = zone_df['temp'].iloc[0] + else: + avg_tmp = (area_under_curve(zone_df['temp']) + / (zone_df.index[-1] - zone_df.index[0])) + + metrics.append(('tz_{}_avg_temp'.format(zone), + avg_tmp, + 'milliCelcius')) + + ret = pd.DataFrame(metrics, columns=['metric', 'value', 'units']) + ret.to_csv(cache_path, index=False) + + return ret + + def _get_extra_job_metrics(self, job_dir, workload): + """ + Get extra metrics (not reported directly by WA) from a WA job output dir + + Returns a DataFrame with columns: + + metric,value,units + """ + # return + # value,metric,units + metrics_df = pd.DataFrame() + + artifacts = self._read_artifacts(job_dir) + if self.parse_traces and 'trace-cmd-bin' in artifacts: + metrics_df = metrics_df.append( + self._get_trace_metrics(artifacts['trace-cmd-bin'])) + + if 'jankbench_results_csv' in artifacts: + df = pd.read_csv(artifacts['jankbench_results_csv']) + df = pd.DataFrame({'value': df['total_duration']}) + df.loc[:, 'metric'] = 'frame_total_duration' + df.loc[:, 'units'] = 'ms' + + metrics_df = metrics_df.append(df) + + # WA's metrics model just exports overall energy metrics, not individual + # samples. We're going to extend that with individual samples so if you + # want to you can see how much variation there was in energy usage. + # So we'll look for the actual CSV files and parse that by hand. + # The parsing necessary is specific to the energy measurement backend + # that was used, which WA doesn't currently report directly. + # TODO: once WA's reporting of this data has been cleaned up a bit I + # think we can simplify this. + for artifact_name, path in artifacts.iteritems(): + if artifact_name.startswith('energy_instrument_output'): + df = pd.read_csv(path) + + if 'device_power' in df.columns: + # Looks like this is from an ACME + + df = pd.DataFrame({'value': df['device_power']}) + + # Figure out what to call the sample metrics. If the + # artifact name has something extra, that will be the + # channel (IIO device) name. Use that to differentiate where + # the samples came from. If not just call it + # 'device_power_sample'. + device_name = artifact_name[len('energy_instrument_output') + 1:] + name_extra = device_name or 'device' + df.loc[:, 'metric'] = '{}_power_sample'.format(name_extra) + + df.loc[:, 'units'] = 'watts' + + metrics_df = metrics_df.append(df) + elif 'output_power' in df.columns and 'USB_power' in df.columns: + # Looks like this is from a Monsoon + # For monsoon the USB and device power are collected + # together with the same timestamps, so we can just add them + # up. + power_samples = df['output_power'] + df['USB_power'] + df = pd.DataFrame({'value': power_samples}) + df.loc[:, 'metric'] = 'device_power_sample' + df.loc[:, 'units'] = 'watts' + + metrics_df = metrics_df.append(df) + + return metrics_df + + def _wa_get_kernel_sha1(self, wa_dir): + """ + Find the SHA1 of the kernel that a WA3 run was run against + """ + with open(os.path.join(wa_dir, '__meta', 'target_info.json')) as f: + target_info = json.load(f) + return KernelVersion(target_info['kernel_release']).sha1 + + def _select(self, tag='.*', kernel='.*', test='.*'): + _df = self.results_df + _df = _df[_df.tag.str.contains(tag)] + _df = _df[_df.kernel.str.contains(kernel)] + _df = _df[_df.test.str.contains(test)] + return _df + + @property + def workloads(self): + return self.results_df['kernel'].unique() + + @property + def workloads(self): + return self.results_df['workload'].unique() + + @property + def tags(self): + return self.results_df['tag'].unique() + + def tests(self, workload=None): + df = self.results_df + if workload: + df = df[df['workload'] == workload] + return df['test'].unique() + + def workload_available_metrics(self, workload): + return (self.results_df + .groupby('workload').get_group(workload) + ['metric'].unique()) + + def _get_metric_df(self, workload, metric, tag, kernel, test): + """ + Common helper for getting results to plot for a given metric + """ + df = self._select(tag, kernel, test) + if df.empty: + self._log.warn("No data to plot for (tag: %s, kernel: %s, test: %s)", + tag, kernel, test) + return None + + valid_workloads = df.workload.unique() + if workload not in valid_workloads: + self._log.warning("No data for [%s] workload", workload) + self._log.info("Workloads with data, for the specified filters, are:") + self._log.info(" %s", ','.join(valid_workloads)) + return None + df = df[df['workload'] == workload] + + valid_metrics = df.metric.unique() + if metric not in valid_metrics: + self._log.warning("No metric [%s] collected for workoad [%s]", + metric, workload) + self._log.info("Metrics with data, for the specied filters, are:") + self._log.info(" %s", ', '.join(valid_metrics)) + return None + df = df[df['metric'] == metric] + + units = df['units'].unique() + if len(units) > 1: + raise RuntimError('Found different units for workload "{}" metric "{}": {}' + .format(workload, metric, units)) + + return df + + + SortBy = namedtuple('SortBy', ['key', 'params', 'column']) + + def _get_sort_params(self, sort_on): + """ + Validate a sort criteria and return the parameters required by the + boxplot and report methods. + """ + valid_sort = ['count', 'mean', 'std', 'min', 'max'] + + # Verify if valid percentile string has been required + match = re.match('^(?P\d{1,3})\%$', sort_on) + if match: + quantile = int(match.group('quantile')) + if quantile < 1 or quantile > 100: + raise ValueError("Error sorting data: Quantile value out of range [1..100]") + return self.SortBy('quantile', {'q': quantile/100.}, sort_on) + + # Otherwise, verify if it's a valid Pandas::describe()'s column name + if sort_on in valid_sort: + return self.SortBy(sort_on, {}, sort_on) + + raise ValueError( + "sort_on={} not supported, allowed values are percentile or {}" + .format(sort_on, valid_sort)) + + def boxplot(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False, + xlim=None): + """ + Display boxplots of a certain metric + + Creates horizontal boxplots of metrics in the results. Check + ``workloads`` and ``workload_available_metrics`` to find the available + workloads and metrics. Check ``tags``, ``tests`` and ``kernels`` + to find the names that results can be filtered against. + + By default, the box with the lowest mean value is plotted at the top of + the graph, this can be customized with ``sort_on`` and ``ascending``. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + + :param sort_on: Name of the statistic to order data for. + Supported values are: count, mean, std, min, max. + You may alternatively specify a percentile to sort on, + this should be an integer in the range [1..100] + formatted as a percentage, e.g. 95% is the 95th + percentile. + :param ascending: When True, boxplots are plotted by increasing values + (lowest-valued boxplot at the top of the graph) of the + specified `sort_on` statistic. + """ + sp = self._get_sort_params(sort_on) + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return + gb = df.groupby(by) + + # Convert the groupby into a DataFrame with a column for each group + max_group_size = max(len(group) for group in gb.groups.itervalues()) + _df = pd.DataFrame() + for group_name, group in gb: + # Need to pad the group's column so that they all have the same + # length + padding_length = max_group_size - len(group) + padding = pd.Series(np.nan, index=np.arange(padding_length)) + col = group['value'].append(padding) + col.index = np.arange(max_group_size) + _df[group_name] = col + + # Sort the columns + # With default params this puts the box with the lowest mean at the + # bottom. + # NOTE: the not(ascending) condition is required to keep these plots + # aligned with the way describe() reports the stats corresponding to + # each boxplot + sorted_df = getattr(_df, sp.key)(**sp.params) + sorted_df = sorted_df.sort_values(ascending=not(ascending)) + _df = _df[sorted_df.index] + + # Plot boxes sorted by mean + fig, axes = plt.subplots(figsize=(16,8)) + _df.boxplot(ax=axes, vert=False, showmeans=True) + fig.suptitle('') + if xlim: + axes.set_xlim(xlim) + [units] = df['units'].unique() + axes.set_xlabel('{} [{}]'.format(metric, units)) + axes.set_title('{}:{}'.format(workload, metric)) + plt.show() + + return axes + + def describe(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False): + """ + Return a DataFrame of statistics for a certain metric + + Compute mean, std, min, max and [50, 75, 95, 99] percentiles for + the values collected on each iteration of the specified metric. + + Check ``workloads`` and ``workload_available_metrics`` to find the + available workloads and metrics. + Check ``tags``, ``tests`` and ``kernels`` to find the names that + results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + + :param sort_on: Name of the statistic to order data for. + Supported values are: count, mean, std, min, max. + It's also supported at the usage of a percentile value, + which has to be an integer in the range [1..100] and + formatted as a percentage, + e.g. 95% is the 95th percentile. + :param ascending: When True, the statistics are reported by increasing values + of the specified `sort_on` column + """ + sp = self._get_sort_params(sort_on) + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return + + # Add the eventually required additional percentile + percentiles = [0.75, 0.95, 0.99] + if sp.params and 'q' in sp.params: + percentiles.append(sp.params['q']) + percentiles = sorted(list(set(percentiles))) + + grouped = df.groupby(by)['value'] + stats_df = pd.DataFrame( + grouped.describe(percentiles=percentiles)) + + # Use a consistent formatting independently from the PANDAs version + if 'value' in stats_df.columns: + # We must be running on a pre-0.20.0 version of pandas. + # unstack will convert the old output format to the new. + # http://pandas.pydata.org/pandas-docs/version/0.20/whatsnew.html#groupby-describe-formatting + # Main difference is that here we have a top-level column + # named 'value' + stats_df = stats_df.unstack() + else: + # Let's add a top-level column named 'value' which will be replaced + # by the actual metric name by the following code + stats_df.columns = pd.MultiIndex.from_product( + [['value'], stats_df.columns]) + + # Sort entries by the required metric and order value + stats_df.sort_values(by=[('value', sp.column)], + ascending=ascending, inplace=True) + stats_df.rename(columns={'value': metric}, inplace=True) + + return stats_df + + def report(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False, + xlim=None): + """ + Report a boxplot and a set of statistics for a certain metric + + This is a convenience method to call both ``boxplot`` and ``describe`` + at the same time to get a consistent graphical and numerical + representation of the values for the specified metric. + + Check ``workloads`` and ``workload_available_metrics`` to find the + available workloads and metrics. + Check ``tags``, ``tests`` and ``kernels`` to find the names that + results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + axes = self.boxplot(workload, metric, tag, kernel, test, + by, sort_on, ascending, xlim) + stats_df = self.describe(workload, metric, tag, kernel, test, + by, sort_on, ascending) + display(stats_df) + + return (axes, stats_df) + + + CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below']) + + def _get_cdf(self, data, threshold): + """ + Build the "Cumulative Distribution Function" (CDF) for the given data + """ + # Build the series of sorted values + ser = data.sort_values() + if len(ser) < 1000: + # Append again the last (and largest) value. + # This step is important especially for small sample sizes + # in order to get an unbiased CDF + ser = ser.append(pd.Series(ser.iloc[-1])) + df = pd.Series(np.linspace(0., 1., len(ser)), index=ser) + + # Compute percentage of samples above/below the specified threshold + below = float(max(df[:threshold])) + above = 1 - below + return self.CDF(df, threshold, above, below) + + def plot_cdf(self, workload='jankbench', metric='frame_total_duration', + threshold=16, tag='.*', kernel='.*', test='.*'): + """ + Display cumulative distribution functions of a certain metric + + Draws CDFs of metrics in the results. Check ``workloads`` and + ``workload_available_metrics`` to find the available workloads and + metrics. Check ``tags``, ``tests`` and ``kernels`` to find the + names that results can be filtered against. + + The most likely use-case for this is plotting frame rendering times + under Jankbench, so default parameters are provided to make this easy. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param threshold: Value to highlight in the plot - the likely use for + this is highlighting the maximum acceptable + frame-rendering time in order to see at a glance the + rough proportion of frames that were rendered in time. + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return + + test_cnt = len(df.groupby(['test', 'tag', 'kernel'])) + colors = iter(cm.rainbow(np.linspace(0, 1, test_cnt+1))) + + fig, axes = plt.subplots() + axes.axvspan(0, threshold, facecolor='g', alpha=0.1); + + labels = [] + lines = [] + for keys, df in df.groupby(['test', 'tag', 'kernel']): + labels.append("{:16s}: {:32s}".format(keys[2], keys[1])) + color = next(colors) + cdf = self._get_cdf(df['value'], threshold) + [units] = df['units'].unique() + ax = cdf.df.plot(ax=axes, legend=False, xlim=(0,None), figsize=(16, 6), + title='Total duration CDF ({:.1f}% within {} [{}] threshold)'\ + .format(100. * cdf.below, threshold, units), + label=test, + color=to_hex(color)) + lines.append(ax.lines[-1]) + axes.axhline(y=cdf.below, linewidth=1, + linestyle='--', color=to_hex(color)) + self._log.debug("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below) + + axes.grid(True) + axes.legend(lines, labels) + plt.show() + + def find_comparisons(self, base_id=None, by='kernel'): + """ + Find metrics that changed between a baseline and variants + + The notion of 'variant' and 'baseline' is defined by the `by` param. If + by='kernel', then `base_id` should be a kernel SHA (or whatever key the + 'kernel' column in the results_df uses). If by='tag' then `base_id` + should be a WA 'tag id' (as named in the WA agenda). + """ + comparisons = [] + + # I dunno why I wrote this with a namedtuple instead of just a dict or + # whatever, but it works fine + Comparison = namedtuple('Comparison', ['metric', 'test', 'inv_id', + 'base_id', 'base_mean', 'base_std', + 'new_id', 'new_mean', 'new_std', + 'diff', 'diff_pct', 'pvalue']) + + # If comparing by kernel, only check comparisons where the 'tag' is the same + # If comparing by tag, only check where kernel is same + if by == 'kernel': + invariant = 'tag' + elif by == 'tag': + invariant = 'kernel' + else: + raise ValueError('`by` must be "kernel" or "tag"') + + available_baselines = self.results_df[by].unique() + if base_id is None: + base_id = available_baselines[0] + if base_id not in available_baselines: + raise ValueError('base_id "{}" not a valid "{}" (available: {}). ' + 'Did you mean to set by="{}"?'.format( + base_id, by, available_baselines, invariant)) + + for metric, metric_results in self.results_df.groupby('metric'): + # inv_id will either be the id of the kernel or of the tag, + # depending on the `by` param. + # So wl_inv_results will be the results entries for that workload on + # that kernel/tag + for (test, inv_id), wl_inv_results in metric_results.groupby(['test', invariant]): + gb = wl_inv_results.groupby(by)['value'] + + if base_id not in gb.groups: + self._log.warning('Skipping - No baseline results for test ' + '[%s] %s [%s] metric [%s]', + test, invariant, inv_id, metric) + continue + + base_results = gb.get_group(base_id) + base_mean = base_results.mean() + + for group_id, group_results in gb: + if group_id == base_id: + continue + + # group_id is now a kernel id or a tag (depending on + # `by`). group_results is a slice of all the rows of self.results_df + # for a given metric, test, tag/test tuple. We + # create comparison object to show how that metric changed + # wrt. to the base tag/test. + + group_mean = group_results.mean() + mean_diff = group_mean - base_mean + # Calculate percentage difference in mean metric value + if base_mean != 0: + mean_diff_pct = mean_diff * 100. / base_mean + else: + # base mean is 0, can't divide by that. + if group_mean == 0: + # Both are 0 so diff_pct is 0 + mean_diff_pct =0 + else: + # Tricky one - base value was 0, new value isn't. + # Let's just call it a 100% difference. + mean_diff_pct = 100 + + if len(group_results) <= 1 or len(base_results) <= 1: + # Can't do ttest_ind if we only have one sample. There + # are proper t-tests for this, but let's just assume the + # worst. + pvalue = 1.0 + elif mean_diff == 0: + # ttest_ind also gives a warning if the two data sets + # are the same and have no variance. I don't know why + # that is to be honest, but anyway if there's no + # difference in the mean, we don't care about the + # p-value. + pvalue = 1.0 + else: + # Find a p-value which hopefully represents the + # (complement of the) certainty that any difference in + # the mean represents something real. + pvalue = ttest_ind(group_results, base_results, equal_var=False).pvalue + + comparisons.append(Comparison( + metric, test, inv_id, + base_id, base_mean, base_results.std(), + group_id, group_mean, group_results.std(), + mean_diff, mean_diff_pct, pvalue)) + + return pd.DataFrame(comparisons) + + def plot_comparisons(self, base_id=None, by='kernel'): + """ + Visualise metrics that changed between a baseline and variants + + The notion of 'variant' and 'baseline' is defined by the `by` param. If + by='kernel', then `base_id` should be a kernel SHA (or whatever key the + 'kernel' column in the results_df uses). If by='tag' then `base_id` + should be a WA 'tag id' (as named in the WA agenda). + """ + df = self.find_comparisons(base_id=base_id, by=by) + + if df.empty: + self._log.error('No comparisons by %s found', by) + if len(self.results_df[by].unique()) == 1: + self._log.warning('There is only one %s in the results', by) + return + + # Separate plot for each test (e.g. one plot for Jankbench list_view) + for (test, inv_id), test_comparisons in df.groupby(('test', 'inv_id')): + # Vertical size of plot depends on how many metrics we're comparing + # and how many things (kernels/tags) we're comparing metrics for. + # a.k.a the total length of the comparisons df. + fig, ax = plt.subplots(figsize=(15, len(test_comparisons) / 2.)) + + # pos is used as the Y-axis. The y-axis is a discrete axis with a + # point for each of the metrics we're comparing. matplotlib needs + # that in numerical form. + # We also have one more tick on the Y-axis than we actually need - + # this is a terrible hack which is necessary because when we set the + # opacity of the first bar, it sets the opacity of the legend. So we + # introduce a dummy bar with a value of 0 and an opacity of 1. + all_metrics = test_comparisons['metric'].unique() + pos = np.arange(-1, len(all_metrics)) + + # At each point on the discrete y-axis we'll have one bar for each + # comparison: one per kernel/tag (depending on the `by` param), minus + # one for the baseline. + # If there are more bars we'll need to make them thinner so they + # fit. The sum of the bars' thicknesses should be 60% of a tick on + # the 'y-axis'. + thickness= 0.6 / len(test_comparisons.groupby('new_id')) + + # TODO: something is up with the calculations above, because there's + # always a bit of empty space at the bottom of the axes. + + + gb = test_comparisons.groupby('new_id') + colors = cm.rainbow(np.linspace(0, 1, len(gb))) + for i, (group, gdf) in enumerate(gb): + def get_dummy_row(metric): + return pd.DataFrame({col: 0 for col in gdf.columns}, index=[metric]) + + missing_metrics = set(all_metrics) - set(gdf['metric'].unique()) + gdf = gdf.set_index('metric') + for missing_metric in missing_metrics: + self._log.warning( + "Data missing, can't compare metric [{}] for {} [{}]" + .format(missing_metric, by, group)) + gdf = gdf.append(get_dummy_row(missing_metric)) + + # Ensure the comparisons are in the same order for each group + gdf = gdf.reindex(all_metrics) + + # Append the dummy row we're using to fix the legend opacity + gdf = get_dummy_row('').append(gdf) + + # For each of the things we're comparing we'll plot a bar chart + # but slightly shifted. That's how we get multiple bars on each + # y-axis point. + bars = ax.barh(bottom=pos + (i * thickness), + width=gdf['diff_pct'], + height=thickness, label=group, + color=colors[i % len(colors)], align='center') + # Decrease the opacity for comparisons with a high p-value + for bar, pvalue in zip(bars, gdf['pvalue']): + bar.set_alpha(1 - (min(pvalue * 10, 0.95))) + + # Add some text for labels, title and axes ticks + ax.set_xlabel('Percent difference') + [baseline] = test_comparisons['base_id'].unique() + ax.set_title('{} ({}): Percent difference compared to {} \nopacity depicts p-value' + .format(test, inv_id, baseline)) + ax.set_yticklabels(gdf.index.tolist()) + ax.set_yticks(pos + thickness / 2) + # ax.set_xlim((-50, 50)) + ax.legend(loc='best') + + ax.grid(True) + + plt.show() + + def _read_artifacts(self, job_dir): + with open(os.path.join(job_dir, 'result.json')) as f: + ret = {a['name']: os.path.join(job_dir, a['path']) + for a in json.load(f)['artifacts']} + return ret + + def _find_job_dir(self, workload='.*', tag='.*', kernel='.*', test='.*', + iteration=1): + df = self._select(tag, kernel, test) + df = df[df['workload'].str.match(workload)] + + job_dirs = df['_job_dir'].unique() + + if len(job_dirs) > 1: + raise ValueError("Params for get_artifacts don't uniquely identify a job. " + "for workload='{}' tag='{}' kernel='{}' test='{}' iteration={}, " + "found:\n{}" .format( + workload, tag, kernel, test, iteration, '\n'.join(job_dirs))) + if not job_dirs: + raise ValueError( + "No job found for " + "workload='{}' tag='{}' kernel='{}' test='{}' iteration={}" + .format(workload, tag, kernel, test, iteration)) + + [job_dir] = job_dirs + return job_dir + + def get_artifacts(self, workload='.*', tag='.*', kernel='.*', test='.*', + iteration=1): + """ + Get a dict mapping artifact names to file paths for a specific job. + + artifact_name specifies the name of an artifact, e.g. 'trace_bin' to + find the ftrace file from the specific job run. The other parameters + should be used to uniquely identify a run of a job. + """ + job_dir = self._find_job_dir(workload, tag, kernel, test, iteration) + return self._read_artifacts(job_dir) + + def get_artifact(self, artifact_name, workload='.*', + tag='.*', kernel='.*', test='.*', + iteration=1): + """ + Get the path of an artifact attached to a job output. + + artifact_name specifies the name of an artifact, e.g. 'trace_bin' to + find the ftrace file from the specific job run. The other parameters + should be used to uniquely identify a run of a job. + """ + job_dir = self._find_job_dir(workload, tag, kernel, test, iteration) + artifacts = self._read_artifacts(job_dir) + + if not artifact_name in artifacts: + raise ValueError("No '{}' artifact found in {} (have {})".format( + artifact_name, job_dir, artifacts.keys())) + + return artifacts[artifact_name] diff --git a/logging.conf b/logging.conf index f0bca7426fdcbc4b8ddd67efe69db22402616ae4..4a1a85e77ea126e5410d6a70cf49b728c14b49a7 100644 --- a/logging.conf +++ b/logging.conf @@ -20,7 +20,7 @@ propagate=0 # For example, to enable debugging just for the TestEnv module, you need to # uncomment the lovver_TestEnv section and set: [loggers] -keys=root,Target,AndroidTarget,android,LinuxTarget,ssh,TestEnv,LisaTest,Executor,Workload,RTApp,Benchmark,local_connection,EnergyModel +keys=root,Target,AndroidTarget,android,LinuxTarget,ssh,TestEnv,LisaTest,Executor,Workload,RTApp,Benchmark,local_connection,EnergyModel,WaResultsCollector [logger_Target] qualname=Target @@ -100,6 +100,12 @@ level=INFO handlers=consoleHandler,fileHandler propagate=0 +[logger_WaResultsCollector] +qualname=WaResultsCollector +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + ################################################################################ ### Handlers diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index af6e81ed6fa843be7e1f14436fa677dd51696bc1..b96f2434a426ee7aa7d2631457100c662624528e 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -376,6 +376,103 @@ echo } +################################################################################ +# LISA Workloads utility functions +################################################################################ + +export WLTEST_VENV="$LISA_HOME/tools/wa_venv" + +function lisa-wltests-cleanup { +[ ! -d $WLTEST_VENV ] || return 0 +rm -rf $WLTEST_VENV +} + +function lisa-wltest-init { +export WLTEST_HOME="$LISA_HOME/tools/wltests" +export WLTEST_DL="$LISA_HOME/libs/devlib" +export WLTEST_WA="$LISA_HOME/tools/workload-automation" +export WA_USER_DIRECTORY="$LISA_HOME/tools/wa_user_directory" + +# If the python virtual env existis: +# let's assume everithing has been already setup and we are ready to go +if [ -d $WLTEST_VENV ]; then + source $WLTEST_VENV/bin/activate + return 0 +fi + +# Check for require dependencies +which virtualenv &>/dev/null +if [ $? -ne 0 ]; then +cat < pip install virtualenv + +EOF + return -1 +fi + +# Create and activate a python's virtual environment to be used for the +# installation of the required version of external libraries and tools +virtualenv $WLTEST_VENV +source $WLTEST_VENV/bin/activate + +# Clone Workload Automation and install it in the virtual environment +_lisa-update-submodules +pip install -e $WLTEST_WA +pip install -e $WLTEST_DL + +# Leave the virtualenv once setup completed +deactivate +} + +function lisa-wltest-series { + +# Check that the environment is properly configured +if [ -z $ANDROID_HOME ]; then +cat </dev/null +if [ $? -ne 0 ]; then + cat < sudo apt-get install coreutils + +EOF + return -1 +fi + +# Ensure the wltest environment has been configured, and get the relative +# patch loaded in the environment +lisa-wltest-init +[ $? -eq 0 ] || exit -1 + +# Setup Python virutal env, only if not already done +echo $PATH | grep wa_venv &>/dev/null +[ $? -eq 0 ] || source $WLTEST_VENV/bin/activate + +# Run the build's provided test_series +$WLTEST_HOME/test_series "$@" + +# Leave the virtualenv once tests completed +deactivate +} + ################################################################################ # LISA Shell MAIN ################################################################################ diff --git a/tests/lisa/test_trace.py b/tests/lisa/test_trace.py index 25ccba24dbd9916cfb616d9fb448d063ac4359db..400f0f24330712b39aad19a207399d90fa113fb4 100644 --- a/tests/lisa/test_trace.py +++ b/tests/lisa/test_trace.py @@ -162,6 +162,35 @@ class TestTrace(TestCase): self.assertEqual(trace.platform['cpus_count'], 3) + def test_dfg_cpu_wakeups(self): + """ + Test the cpu_wakeups DataFrame getter + """ + trace = self.make_trace(""" + -0 [004] 519.021928: cpu_idle: state=4294967295 cpu_id=4 + -0 [004] 519.022147: cpu_idle: state=0 cpu_id=4 + -0 [004] 519.022641: cpu_idle: state=4294967295 cpu_id=4 + -0 [001] 519.022642: cpu_idle: state=4294967295 cpu_id=1 + -0 [002] 519.022643: cpu_idle: state=4294967295 cpu_id=2 + -0 [001] 519.022788: cpu_idle: state=0 cpu_id=1 + -0 [002] 519.022831: cpu_idle: state=2 cpu_id=2 + -0 [003] 519.022867: cpu_idle: state=4294967295 cpu_id=3 + -0 [003] 519.023045: cpu_idle: state=2 cpu_id=3 + -0 [004] 519.023080: cpu_idle: state=1 cpu_id=4 + """) + + df = trace.data_frame.cpu_wakeups() + + exp_index=[519.021928, 519.022641, 519.022642, 519.022643, 519.022867] + exp_cpus= [ 4, 4, 1, 2, 3] + self.assertListEqual(df.index.tolist(), exp_index) + self.assertListEqual(df.cpu.tolist(), exp_cpus) + + df = trace.data_frame.cpu_wakeups([2]) + + self.assertListEqual(df.index.tolist(), [519.022643]) + self.assertListEqual(df.cpu.tolist(), [2]) + class TestTraceNoClusterData(TestTrace): """ Test Trace without cluster data diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fb4aa003a2cf061e65d4dda6b2ea37ccdbe633f --- /dev/null +++ b/tools/wa_user_directory/config.yaml @@ -0,0 +1,19 @@ +# Skeleton global config.yaml for WA3 +device: generic_android + +# Prevents Geekbench and other such workloads from being run by accident. For +# devices where such workloads are safe (i.e. publicly-available devices), +# override this in a per-device config file and pass it to wa with the +# --config/-c option. +allow_phone_home: false + +trace_cmd: + buffer_size: 102400 + report: false + +# Disable re-trying things that go wrong +max_retries: 0 + +# If any of the workloads fail during the initialization phase, don't bail out +# on the rest of the run +bail_on_init_failure: false diff --git a/tools/wa_user_directory/plugins/exoplayer/__init__.py b/tools/wa_user_directory/plugins/exoplayer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..658c297035cae9542895162726df94b8df3bc580 --- /dev/null +++ b/tools/wa_user_directory/plugins/exoplayer/__init__.py @@ -0,0 +1,209 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import defaultdict +import re +import os +import time +import urllib + +from wa import ApkWorkload, Parameter, ConfigError, WorkloadError +from wa.framework.configuration.core import settings +from wa.utils.types import boolean +from wa.utils.misc import ensure_directory_exists +from devlib.utils.android import grant_app_permissions + +# Regexps for benchmark synchronization +REGEXPS = { + 'start' : '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity', + 'duration' : '.*period \[(?P[0-9]+.*)\]', + 'end' : '.*state \[.+, .+, E\]', + 'dropped_frames': '.*droppedFrames \[(?P[0-9]+\.[0-9]+), (?P[0-9]+)\]' +} + + +DOWNLOAD_URLS = { + 'mp4_1080p': 'http://distribution.bbb3d.renderfarming.net/video/mp4/bbb_sunflower_1080p_30fps_normal.mp4', + 'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov', + 'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov', + 'ogg_18:45': 'http://upload.wikimedia.org/wikipedia/commons/c/ca/Tchaikovsky_-_Romeo_and_Juliet_Ouverture_-_Antal_Dorati_(1959).ogg', +} + + +class ExoPlayer(ApkWorkload): + """ + Android ExoPlayer + + ExoPlayer is the basic video player library that is used by the YouTube + android app. The aim of this workload is to test a proxy for YouTube + performance on targets where running the real YouTube app is not possible + due its dependencies. + + ExoPlayer sources: https://github.com/google/ExoPlayer + + The 'demo' application is used by this workload. It can easily be built by + loading the ExoPlayer sources into Android Studio. + + Version r2.4.0 built from commit d979469 is known to work + + Produces a metric 'exoplayer_dropped_frames' - this is the count of frames + that Exoplayer itself reports as dropped. This is not the same thing as the + dropped frames reported by gfxinfo. + """ + + name = 'exoplayer' + + video_directory = os.path.join(settings.dependencies_directory, name) + + package_names = ['com.google.android.exoplayer2.demo'] + versions = ['2.4.0'] + action = 'com.google.android.exoplayer.demo.action.VIEW' + default_format = 'mov_720p' + + parameters = [ + Parameter('version', allowed_values=versions, default=versions[-1], override=True), + Parameter('duration', kind=int, default=20, + description=""" + Playback duration of the video file. This becomes the duration of the workload. + If provided must be shorter than the length of the media. + """), + Parameter('format', allowed_values=DOWNLOAD_URLS.keys(), + description=""" + Specifies which format video file to play. Default is {} + """.format(default_format)), + Parameter('filename', + description=""" + The name of the video file to play. This can be either a path + to the file anywhere on your file system, or it could be just a + name, in which case, the workload will look for it in + ``{}`` + *Note*: either format or filename should be specified, but not both! + """.format(video_directory)), + Parameter('force_dependency_push', kind=boolean, default=False, + description=""" + If true, video will always be pushed to device, regardless + of whether the file is already on the device. Default is ``False``. + """), + ] + + def validate(self): + if self.format and self.filename: + raise ConfigError('Ether format *or* filename must be specified; but not both.') + + if not self.format and not self.filename: + self.format = self.default_format + + def _find_host_video_file(self): + """Pick the video file we're going to use, download it if necessary""" + if self.filename: + if self.filename[0] in './' or len(self.filename) > 1 and self.filename[1] == ':': + filepath = os.path.abspath(self.filename) + else: + filepath = os.path.join(self.video_directory, self.filename) + if not os.path.isfile(filepath): + raise WorkloadError('{} does not exist.'.format(filepath)) + return filepath + else: + # Search for files we've already downloaded + files = [] + for filename in os.listdir(self.video_directory): + format_ext, format_resolution = self.format.split('_') + _, file_ext = os.path.splitext(filename) + if file_ext == '.' + format_ext and format_resolution in filename: + files.append(os.path.join(self.video_directory, filename)) + + if not files: + # Download a file with the requested format + url = DOWNLOAD_URLS[self.format] + filepath = os.path.join(self.video_directory, os.path.basename(url)) + self.logger.info('Downloading {} to {}...'.format(url, filepath)) + urllib.urlretrieve(url, filepath) + return filepath + else: + if len(files) > 1: + self.logger.warn('Multiple files found for {} format. Using {}.' + .format(self.format, files[0])) + self.logger.warn('Use "filename"parameter instead of ' + '"format" to specify a different file.') + return files[0] + + def init_resources(self, context): + # Needs to happen first, as it sets self.format, which is required by + # _find_host_video_file + self.validate() + + ensure_directory_exists(self.video_directory) + self.host_video_file = self._find_host_video_file() + + def setup(self, context): + super(ExoPlayer, self).setup(context) + + grant_app_permissions(self.target, self.package) + + self.device_video_file = self.target.path.join(self.target.working_directory, + os.path.basename(self.host_video_file)) + if self.force_dependency_push or not self.target.file_exists(self.device_video_file): + self.logger.info('Copying {} to device.'.format(self.host_video_file)) + self.target.push(self.host_video_file, self.device_video_file, timeout=120) + + self.play_cmd = 'am start -a {} -d "file://{}"'.format(self.action, + self.device_video_file) + + self.monitor = self.target.get_logcat_monitor(REGEXPS.values()) + self.monitor.start() + + def run(self, context): + self.target.execute(self.play_cmd) + + self.monitor.wait_for(REGEXPS['start']) + self.logger.info('Playing media file') + + line = self.monitor.wait_for(REGEXPS['duration'])[0] + media_duration_s = int(round(float(re.search(REGEXPS['duration'], line) + .group('duration')))) + + self.logger.info('Media duration is {} seconds'.format(media_duration_s)) + + if self.duration > media_duration_s: + raise ConfigError( + "'duration' param ({}) longer than media duration ({})".format( + self.duration, media_duration_s)) + + if self.duration: + self.logger.info('Waiting {} seconds before ending playback' + .format(self.duration)) + time.sleep(self.duration) + else: + self.logger.info('Waiting for playback completion ({} seconds)' + .format(media_duration_s)) + self.monitor.wait_for(REGEXPS['end'], timeout = media_duration_s + 30) + + def update_output(self, context): + regex = re.compile(REGEXPS['dropped_frames']) + + dropped_frames = 0 + for line in self.monitor.get_log(): + match = regex.match(line) + if match: + dropped_frames += int(match.group('count')) + + context.add_metric('exoplayer_dropped_frames', dropped_frames, + lower_is_better=True) + + def teardown(self, context): + super(ExoPlayer, self).teardown(context) + self.monitor.stop() diff --git a/tools/wa_user_directory/plugins/jankbench/__init__.py b/tools/wa_user_directory/plugins/jankbench/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f3bc198c7c0c7238afdf176d018178e14797e00 --- /dev/null +++ b/tools/wa_user_directory/plugins/jankbench/__init__.py @@ -0,0 +1,153 @@ +# Copyright 2017 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import csv +import os +import re +import subprocess +import threading +import select +import sqlite3 + +from wa import Parameter, ApkWorkload +from wa.framework.exception import WorkloadError + +REGEXPS = { + 'start': (r'.*START.*' + 'cmp=com.android.benchmark/.app.RunLocalBenchmarksActivity.*'), + 'count': '.*iteration: (?P[0-9]+).*', + 'metrics': (r'.*Mean: (?P[0-9\.]+)\s+JankP: (?P[0-9\.]+)\s+' + 'StdDev: (?P[0-9\.]+)\s+Count Bad: (?P[0-9]+)\s+' + 'Count Jank: (?P[0-9]+).*'), + 'done': r'.*BenchmarkDone!.*', +} + +class Jankbench(ApkWorkload): + + name = 'jankbench' + description = """ + Google's Jankbench benchmark. + + Jankbench simulates user interaction with Android UI components and records + frame rendering times and 'jank' (rendering discontinuity) in an SQLite + database. This is believed to be a good proxy for the smoothness of user + experience. + + Dumps a JankbenchResults.sqlite file in the output directory. This database + contains a table 'ui_results' with a row for each frame, showing its + rendering time in ms in the 'total_duration' column, and whether or not it + was a jank frame in the 'jank_frame' column. + + This information is also extracted from the SQLite file and dumped as + jankbench_frames.csv. This is _not_ necessarily the same information as + provided by gfxinfo (fps instrument). + """ + + versions = ['1.0'] + activity = '.app.RunLocalBenchmarksActivity' + package = 'com.android.benchmark' + package_names = [package] + + target_db_path = '/data/data/{}/databases/BenchmarkResults'.format(package) + + test_ids = { + 'list_view' : 0, + 'image_list_view' : 1, + 'shadow_grid' : 2, + 'low_hitrate_text' : 3, + 'high_hitrate_text' : 4, + 'edit_text' : 5, + } + + parameters = [ + Parameter('test', + default=test_ids.keys()[0], allowed_values=test_ids.keys(), + description='Which Jankbench sub-benchmark to run'), + Parameter('run_timeout', kind=int, default=10 * 60, + description=""" + Timeout for workload execution. The workload will be killed if it hasn't completed + within this period. In seconds. + """), + Parameter('times', kind=int, default=1, constraint=lambda x: x > 0, + description=('Specifies the number of times the benchmark will be run in a "tight ' + 'loop", i.e. without performing setup/teardown in between.')), + ] + + def initialize(self, context): + super(Jankbench, self).initialize(context) + + # Need root to get results database + if not self.target.is_rooted: + raise WorkloadError('Jankbench workload requires device to be rooted') + + def setup(self, context): + super(Jankbench, self).setup(context) + self.monitor = self.target.get_logcat_monitor(REGEXPS.values()) + self.monitor.start() + + self.command = ( + 'am start -n com.android.benchmark/.app.RunLocalBenchmarksActivity ' + '--eia com.android.benchmark.EXTRA_ENABLED_BENCHMARK_IDS {0} ' + '--ei com.android.benchmark.EXTRA_RUN_COUNT {1}' + ).format(self.test_ids[self.test], self.times) + + + def run(self, context): + # All we need to do is + # - start the activity, + # - then use the JbRunMonitor to wait until the benchmark reports on + # logcat that it is finished, + # - pull the result database file. + + result = self.target.execute(self.command) + if 'FAILURE' in result: + raise WorkloadError(result) + else: + self.logger.debug(result) + + self.monitor.wait_for(REGEXPS['start'], timeout=30) + self.logger.info('Detected Jankbench start') + + self.monitor.wait_for(REGEXPS['done'], timeout=300*self.times) + + def extract_results(self, context): + # TODO make these artifacts where they should be + super(Jankbench, self).extract_results(context) + host_db_path = os.path.join(context.output_directory, + 'BenchmarkResults.sqlite') + self.target.pull(self.target_db_path, host_db_path, as_root=True) + context.add_artifact('jankbench_results_db', host_db_path, 'data') + + columns = ['_id', 'name', 'run_id', 'iteration', 'total_duration', 'jank_frame'] + jank_frame_idx = columns.index('jank_frame') + query = 'SELECT {} FROM ui_results'.format(','.join(columns)) + conn = sqlite3.connect(os.path.join(host_db_path)) + + csv_path = os.path.join(context.output_directory, 'jankbench_frames.csv') + jank_frames = 0 + with open(csv_path, 'wb') as f: + writer = csv.writer(f) + writer.writerow(columns) + for db_row in conn.execute(query): + writer.writerow(db_row) + if int(db_row[jank_frame_idx]): + jank_frames += 1 + context.add_artifact('jankbench_results_csv', csv_path, 'data') + + context.add_metric('jankbench_jank_frames', jank_frames, + lower_is_better=True) + + def teardown(self, context): + self.monitor.stop() diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36f46a90cd311af920021127a52b9fc67242eb27 --- /dev/null +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -0,0 +1,139 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import re +import time +from zipfile import ZipFile + +from wa import Parameter, Workload +from wa.framework.exception import WorkloadError + +INSTALL_INSTRUCTIONS=""" +This workload has incomplete automation support. Please download the APK from +http://www.futuremark.com/downloads/pcmark-android.apk +and install it on the device. Connect the device to the internet, then open the +app on the device, and hit the 'install' button to set up the 'Work v2' +benchmark. +""" + +class PcMark(Workload): + """ + Android PCMark workload + + TODO: This isn't a proper WA workload! It requires that the app is already + installed set up like so: + + - Install the APK from http://www.futuremark.com/downloads/pcmark-android.apk + - Open the app and hit "install" + + """ + name = 'pcmark' + + package = 'com.futuremark.pcmark.android.benchmark' + activity = 'com.futuremark.gypsum.activity.SplashPageActivity' + + package_names = ['com.google.android.youtube'] + action = 'android.intent.action.VIEW' + + parameters = [ + Parameter('test', default='work', allowed_values=['work'], + description='PCMark sub-benchmark to run'), + ] + + + regexps = { + 'start' : '.*START.*com.futuremark.pcmark.android.benchmark', + 'result': '.*received result for correct code, result file in (?P.*\.zip)' + } + + def initialize(self, context): + super(PcMark, self).initialize(context) + + # Need root to get results + if not self.target.is_rooted: + raise WorkloadError('PCMark workload requires device to be rooted') + + if not self.target.is_installed(self.package): + raise WorkloadError('Package not installed. ' + INSTALL_INSTRUCTIONS) + + path = ('/storage/emulated/0/Android/data/{}/files/dlc/pcma-workv2-data' + .format(self.package)) + if not self.target.file_exists(path): + raise WorkloadError('"Work v2" benchmark not installed through app. ' + + INSTALL_INSTRUCTIONS) + + def setup(self, context): + super(PcMark, self).setup(context) + + self.target.execute('am kill-all') # kill all *background* activities + self.target.execute('am start -n {}/{}'.format(self.package, self.activity)) + time.sleep(5) + + # TODO: we clobber the old auto-rotation setting here. + self.target.set_auto_rotation(False) + self._saved_screen_rotation = self.target.get_rotation() + # Move to benchmark run page + self.target.set_left_rotation() # Needed to make TAB work + self.target.execute('input keyevent KEYCODE_TAB') + self.target.execute('input keyevent KEYCODE_TAB') + + self.monitor = self.target.get_logcat_monitor(self.regexps.values()) + self.monitor.start() + + def run(self, context): + self.target.execute('input keyevent KEYCODE_ENTER') + + self.monitor.wait_for('.*START.*com.futuremark.pcmark.android.benchmark', + timeout=20) + self.logger.info('Detected PCMark start') + + [self.output] = self.monitor.wait_for(self.regexps['result'], timeout=600) + + def extract_results(self, context): + remote_zip_path = re.match(self.regexps['result'], self.output).group('path') + local_zip_path = os.path.join(context.output_directory, + self.target.path.basename(remote_zip_path)) + self.logger.info('pulling {} -> {}'.format(remote_zip_path, local_zip_path)) + self.target.pull(remote_zip_path, local_zip_path, as_root=True) + + with ZipFile(local_zip_path, 'r') as archive: + archive.extractall(context.output_directory) + + xml_path = os.path.join(context.output_directory, 'Result.xml') + if not os.path.exists(xml_path): + raise WorkloadError("PCMark results .zip didn't contain Result.xml") + context.add_artifact('pcmark_result_xml', xml_path, 'data') + + # Fetch workloads names and scores + score_regex = re.compile('\s*.*)Score>(?P[0-9]*)<') + with open(xml_path) as f: + for line in f: + match = score_regex.match(line) + if match: + metric_name = 'pcmark_{}'.format(match.group('name')) + context.add_metric(metric_name, match.group('score')) + + + def teardown(self, context): + super(PcMark, self).teardown(context) + + self.target.execute('am force-stop {}'.format(self.package)) + + self.monitor.stop() + self.target.set_rotation(int(self._saved_screen_rotation)) + diff --git a/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py b/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..76bf5a815a247e30ca7a403b437e56ec34522c86 --- /dev/null +++ b/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import re + +from devlib.utils.android import grant_app_permissions + +from wa import ApkWorkload, Parameter, WorkloadError + +class UbSystemUiJankTests(ApkWorkload): + """ + AOSP UbSystemUiJankTests tests + + Performs actions on the System UI (launcher, settings, etc) so that UI + responsiveness can be evaluated. + + The .apk can be built with `make UbSystemUiJankTests` in the AOSP tree. + + Reports metrics the metrics reported by instrumentation system - these will + likely overlap with those reported by the 'fps' instrument, but should be + more accurately recorded. + """ + + name = 'ubsystemuijanktests' + + package_names = ['android.platform.systemui.tests.jank'] + + tests = [ + 'LauncherJankTests#testOpenAllAppsContainer', + 'LauncherJankTests#testAllAppsContainerSwipe', + 'LauncherJankTests#testHomeScreenSwipe', + 'LauncherJankTests#testWidgetsContainerFling', + 'SettingsJankTests#testSettingsFling', + 'SystemUiJankTests#testRecentAppsFling', + 'SystemUiJankTests#testRecentAppsDismiss', + 'SystemUiJankTests#testNotificationListPull', + 'SystemUiJankTests#testNotificationListPull_manyNotifications', + 'SystemUiJankTests#testQuickSettingsPull', + 'SystemUiJankTests#testUnlock', + 'SystemUiJankTests#testExpandGroup', + 'SystemUiJankTests#testClearAll', + 'SystemUiJankTests#testChangeBrightness', + 'SystemUiJankTests#testNotificationAppear', + 'SystemUiJankTests#testCameraFromLockscreen', + 'SystemUiJankTests#testAmbientWakeUp', + 'SystemUiJankTests#testGoToFullShade', + 'SystemUiJankTests#testInlineReply', + 'SystemUiJankTests#testPinAppearance', + 'SystemUiJankTests#testLaunchSettings', + ] + + parameters = [ + Parameter('test', default=tests[0], allowed_values=tests, + description='Which of the System UI jank tests to run') + ] + + def setup(self, context): + # Override the default setup method, as it calls + # self.apk.start_activity. We dont want to do that. + + self.apk.initialize_package(context) + self.target.execute('am kill-all') # kill all *background* activities + grant_app_permissions(self.target, self.package) + + self.target.clear_logcat() + + jclass = '{}.{}'.format(self.package, self.test) + self.command = 'am instrument -e iterations 1 -e class {} -w {}'.format( + jclass, self.package) + + def run(self, context): + self.output = self.target.execute(self.command) + + # You see 'FAILURES' if an exception is thrown. + # You see 'Process crashed' if it doesn't recognise the class for some + # reason. + # But neither reports an error in the exit code, so check explicitly. + if 'FAILURES' in self.output or 'Process crashed' in self.output: + raise WorkloadError('Failed to run workload: {}'.format(self.output)) + + def update_output(self, context): + # The 'am instrument' command dumps the instrumentation results into + # stdout. It also gets written by the autotester to a storage file - on + # my devices that is /storage/emulated/0/results.log, but I dont know if + # that's the same for every device. + # + # AOSP probably provides standard tooling for parsing this, but I don't + # know how to use it. Anyway, for this use-case just parsing stdout + # works fine. + + regex = re.compile('INSTRUMENTATION_STATUS: (?P[\w-]+)=(?P[0-9\.]+)') + + for line in self.output.splitlines(): + match = regex.match(line) + if match: + key = match.group('key') + value = float(match.group('value')) + + name = 'instrumentation_{}'.format(key) + context.add_metric(name, value, lower_is_better=True) diff --git a/tools/wltests/README.md b/tools/wltests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..200bb0eadcd049896f154a52d98d4fe37456708a --- /dev/null +++ b/tools/wltests/README.md @@ -0,0 +1,167 @@ + +# WLTests - Workloads Tests on a Series of Commits + +The `lisa-wltest-series` takes a Linux kernel tree, a file containing a list of +commits, and a test command. It then compiles each of those kernels, boots them +on a remote Android target, and runs the test command for each of them. + +An IPython notebook is provided for analysing the results. + + +## Initialization + +```bash +# Enter your LISA main folder +$> cd /path/to/your/LISA_HOME + +# Initialize a LISAShell +$> source init_env + +# Export your ANDROID_HOME +[LISAShell lisa] \> export ANDROID_HOME=/path/to/your/android-sdk-linux + +# Ensure your cross-compiler is in your PATH +[LISAShell lisa] \> export PATH=/path/to/your/cross-compiler/bin:$PATH +``` + +## Prepare the target device + +In general your device should be pre-configured and flashed with an updated and +stable user-space. The userspace usually comes with a boot image (`boot.img`) +which provides also a ramdisk iamge. In order to be able to test different +kernels, you are requried to deploy the ramdisk which matches your `boot.img` +under the corresponding platform folder. + +For example, if you are targeting an hikey960 board running android-4.4, the +ramdisk image should be deployed under: +``` + tools/wltests/platforms/hikey960_android-4.4/ramdisk.gz +``` +Please, note that the name of the ramdisk image, in this example `ramdisk.gz`, +has to match the value for the `RAMDISK_IMAGE` variable defined by the paltform +definition file, in this example: +``` + tools/wltests/platforms/hikey960_android-4.4/definitions +``` + +### Hikey960 +By default, the firmware on that device reports a device ID when in FASTBOOT +mode which is different from the device ID reported when in ADB mode. +This is a major issue for the testing scripts since they required a mandatory +device ID which is expected to be the same in both ADB and FASTBOOT modes. + +To fix this, you can set a custom and unique device ID for you hikey960 baord +using the following command from FASTBOOT mode: + +```bash +# Set a unique device ID for both FASTBOOT and ADB modes: +[LISAShell lisa] \> DEVICE_ID="UniqueIdYouLike" +[LISAShell lisa] \> fastboot getvar nve:SN@$DEVICE_ID +``` + +## Download workload dependencies + +We cannot distribute the APK files required for this tool to run the workloads - +you will need to do that yourself. You can either install them directly on your +device (from the Play Store, if necessary), or populate +`$LISA_HOME/tools/wa_user_directory/dependencies` so that they can be +automatically installed. There should be one directory for each of the named +workloads, containing the required APK file, like so: + +``` +[LISAShell lisa] \> tree tools/wa_user_directory/dependencies/ +tools/wa_user_directory/dependencies/ +├── exoplayer +│   └── exoplayer-demo.apk +└── jankbench + └── jank-benchmark.apk +``` + +Note that the leaf filename of the .apk files is not important - the files' +content will be inspected using Android's packaging tools. + +If the tool finds that an .apk file is installed on the device, but not present +on the host, it will be pulled into your dependencies/ directory. + +#### Exoplayer + +Exoplayer is the underlying tech used by the YouTube Android app. The hope is +that it can be used as a proxy for Youtube performance on devices where running +Youtube itself is not practical. + +Exoplayer can be built from source code. Clone +https://github.com/google/ExoPlayer, open the source tree in Android Studio, and +compile. This should result in a file named 'demo-noExtensions-debug.apk'. + +#### Jankbench + +You'll need to get the Jankbench .apk from Google. + +#### YouTube + +By its nature, YouTube needs to be pre-installed on the device for the +automation to work. Note that WA3 has two YouTube workloads: The "youtube" +workload simulates UI interactions, while the "youtube_playback" simply plays a +video from a URL. The former workload appears to be susceptible to +reproducibility issues as the content that is rendered (such as advertisements +and video recommendations) can change between invocations. + +#### Geekbench + +The Geekbench automation should be pretty robust. The easiest way to get hold of +it is probably just to install it from the Play Store. Note that as Geekbench +poses a threat of 'phoning home', the tool marks it as dangerous. The WA3 +configuration file provided with this tool in +$LISA_HOME/tools/wa_user_directory/config.yaml sets "allow_phone_home: false" - +this is intended to prevent accidentally running Geekbench on a confidential +device. Therefore you will need to override that setting. If you don't have any +confidential devices you can simply edit that config file. Otherwise, it is best +to create a separate per-device config file that overrides it, for example: + +``` +$ cat hikey960-config.yaml +device_config: + device: 4669290103000000 + +allow_phone_home: true +``` + +Adding `-c /path/to/hikey960config.yaml` to the `wa` command will apply this +configuration. + +#### PCMark + +The PCMark automation support in this tool is very limited. You'll need to +manually install the .apk from +http://www.futuremark.com/downloads/pcmark-android.apk, open it on the device +and hit the 'install' button to install the 'Work' benchmark. +Note that an Internet connection is required to complete the installation. +Furthermore, the robustness of the UI automation is not up to the standards of +the other workloads in WA, so there may be issues running it on untested +devices. +A proper solution would require writing UiAutomator code in the vein of WA's +[Vellamo workload](https://github.com/ARM-software/workload-automation/blob/next/wa/workloads/vellamo/uiauto/app/src/main/java/com/arm/wa/uiauto/vellamo/UiAutomation.java). +Part of the reason this hasn't been done is that PCMark displays its content in +a WebView, which poses a challenge for automation with Android's API. + +## Using the tool + +You'll need to create a list of commits that you want to compare the performance +of. This should be a file in the format produced by running +`git log --no-color --oneline` in your kernel tree. + +The test command is typically a Workload Automation command - you can use +variable substitution to set the location of the output directory that will be +produced - see the example below. + +```bash +# Get a detailed description of the supported options +[LISAShell lisa] \> lisa-wltest-series --help + +# Minimal command line to run a Workload Automation agenda +[LISAShell lisa] \> lisa-wltest-series \ + --platform hikey960_android-4.4 \ + --kernel_path /path/to/your/kernel/hikey-linaro \ + --series /path/to/your/series.sha1 \ + --wa_agenda /path/to/your/agenda.yaml +``` diff --git a/tools/wltests/agendas/example-exoplayer-simple.yaml b/tools/wltests/agendas/example-exoplayer-simple.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2e73474e17d0feb9de9621d4401067d4935d0d3 --- /dev/null +++ b/tools/wltests/agendas/example-exoplayer-simple.yaml @@ -0,0 +1,9 @@ +# This is an example of a simple agenda: It simply runs the exoplayer workload 3 +# times. The workload parameters (such as video playback duration) are left to +# the workload's default, and target and instrumentation configuration will +# either be read from $LISA_HOME/tools/wltests/wa_user_directory/config.yaml or +# the defaults will be used. + +workloads: + - name: exoplayer + iterations: 3 diff --git a/tools/wltests/agendas/example-jankbench.yaml b/tools/wltests/agendas/example-jankbench.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a0332e2e30ba62a92fbc179036baf69cbe619dd --- /dev/null +++ b/tools/wltests/agendas/example-jankbench.yaml @@ -0,0 +1,9 @@ +# This is an example of a simple agenda: It simply runs the jankbench workload 3 +# times. The workload parameters are left to the workload's default, and target +# and instrumentation configuration will either be read from +# $LISA_HOME/tools/wltests/wa_user_directory/config.yaml or the defaults will be +# used. + +workloads: + - name: jankbench + iterations: 3 diff --git a/tools/wltests/agendas/example-rich.yaml b/tools/wltests/agendas/example-rich.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21fcb8f2a07fc021b6dcec142b20c84af8eb4158 --- /dev/null +++ b/tools/wltests/agendas/example-rich.yaml @@ -0,0 +1,99 @@ +# This is an example agenda which shows a possible (rather large) set of +# workloads and data collection. +# +# This will run 5 iterations each of a large set of workloads, under each of +# PELT and WALT load tracking (it assumes that the target supports both +# mechanisms). +# + +global: + # Collect energy data, ftrace files, and dmesg + # You may want to edit your config.yaml to set up the energy_measurement + # instrument (an example configuration is provided in this repo). + instrumentation: [energy_measurement, trace-cmd, dmesg] + # Do everything 5 times + iterations: 5 + + # To aid reproducibility and try to reduce noise in power measurements, + # minimise the screen brightness and set airplane mode on. + # TODO: will this break workloads that require internet? + runtime_params: + brightness: 0 + airplane_mode: true + +# "Sections" are groups of runtime configuration. In the results analysis they +# will be mapped to "tags" using the classifiers field below +sections: + - id: pelt # Just a human-readable name + classifiers: # Will be used to map job output to 'tags' when analysing + load_tracking: pelt_cls + runtime_params: # These are the actual parameters that get set on the target + sysfile_values: + /proc/sys/kernel/sched_use_walt_cpu_util: 0 + /proc/sys/kernel/sched_use_walt_task_util: 0 + + - id: walt + classifiers: + load_tracking: walt_cls + runtime_params: + sysfile_values: + /proc/sys/kernel/sched_use_walt_cpu_util: 1 + /proc/sys/kernel/sched_use_walt_task_util: 1 + +workloads: + # Sit on the homescreen for 15 seconds + - name: homescreen + id: homescreen_15s + workload_parameters: + duration: 15 + + # Play 30 seconds of a video with Exoplayer - this is the basis for the + # YouTube app, so it's hoped that this is a decent proxy for Youtube + # performance on devices where running the real app is impractical + - name: exoplayer + id: exoplayer_30s + workload_parameters: + duration: 30 + + - name: pcmark + id: pcmark + + - name: geekbench + id: geekbench + runtime_parameters: + airplane_mode: false + + # We need one entry for each of the Jankbench sub-benchmarks + - name: jankbench + # 'id' and 'classifiers' are optional - just to make the output directory + # easier to read/parse + id: jb_list_view + classifiers: + test: jb_list_view + # workload_parameters are the real parameters that influence what gets run + workload_parameters: + test: list_view + - name: jankbench + id: jb_image_list_view + classifiers: + test: jb_image_list_view + workload_parameters: + test: image_list_view + - name: jankbench + id: jb_shadow_grid + classifiers: + test: jb_shadow_grid + workload_parameters: + test: shadow_grid + - name: jankbench + id: jb_low_hitrate_text + classifiers: + test: jb_low_hitrate_text + workload_parameters: + test: low_hitrate_text + - name: jankbench + id: jb_edit_text + classifiers: + test: jb_edit_text + workload_parameters: + test: edit_text diff --git a/tools/wltests/android/create_boot_img.sh b/tools/wltests/android/create_boot_img.sh new file mode 100755 index 0000000000000000000000000000000000000000..951c4634dcc79ea5ebe5882be6421e8a48337791 --- /dev/null +++ b/tools/wltests/android/create_boot_img.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +################################################################################ +# Internal configurations +################################################################################ +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/.." +source "${BASE_DIR}/helpers" +source "${DEFINITIONS_PATH}" + +DEFAULT_KERNEL="${KERNEL_SRC}/arch/${ARCH}/boot/${KERNEL_IMAGE}" +KERNEL="${KERNEL:-$DEFAULT_KERNEL}" + +DEFAULT_RAMDISK="${PLATFORM_OVERLAY_PATH}/${RAMDISK_IMAGE}" +RAMDISK="${RAMDISK:-$DEFAULT_RAMDISK}" + +DEFAULT_BOOT_IMAGE="${ARTIFACTS_PATH}/${ANDROID_BOOT_IMAGE}" +BOOT_IMAGE="${BOOT_IMAGE:-$DEFAULT_BOOT_IMAGE}" + +CMDLINE=${CMDLINE:-$KERNEL_CMDLINE} + +if [ ! -f ${KERNEL} ] ; then + c_error "KERNEL image not found: ${KERNEL}" + exit $ENOENT +fi +if [ ! -f ${RAMDISK} ] ; then + c_error "RAMDISK image not found: ${RAMDISK}" + c_warning "A valid ramdisk image, which matches the device user-space" + c_warning "must be deployed by the user under the required path." + c_info "Please refer to the ISTALLATION INSTRUCTIONS" + c_info "if you don'tknow how to provide such an image." + echo + exit $ENOENT +fi + +################################################################################ +# Report configuration +################################################################################ +echo +c_info "Generate BOOT image:" +c_info " $BOOT_IMAGE" +c_info "using this configuration :" +c_info " KERNEL : $KERNEL" +c_info " RAMDISK : $RAMDISK" +c_info " CMDLINE : $CMDLINE" +c_info " ANDROID_IMAGE_BASE : $ANDROID_IMAGE_BASE" +c_info " ANDROID_IMAGE_PAGESIZE : $ANDROID_IMAGE_PAGESIZE" +c_info " ANDROID_OS_VERSION : $ANDROID_OS_VERSION" +c_info " ANDROID_OS_PATCH_LEVEL : $ANDROID_OS_PATCH_LEVEL" + +# Optional arguments +if [ "${ANDROID_TAGS_OFFSET}" ]; then + c_info "- ANDROID_TAGS_OFFSET : ${ANDROID_TAGS_OFFSET}" + ANDROID_TAGS_OFFSET="--tags_offset ${ANDROID_TAGS_OFFSET}" +fi + +if [ "${ANDROID_KERNEL_OFFSET}" ]; then + c_info "- ANDROID_KERNEL_OFFSET : ${ANDROID_KERNEL_OFFSET}" + ANDROID_KERNEL_OFFSET="--kernel_offset ${ANDROID_KERNEL_OFFSET}" +fi + +if [ "${ANDROID_RAMDISK_OFFSET}" ]; then + c_info "- ANDROID_RAMDISK_OFFSET : ${ANDROID_RAMDISK_OFFSET}" + ANDROID_RAMDISK_OFFSET="--ramdisk_offset ${ANDROID_RAMDISK_OFFSET}" +fi + +################################################################################ +# Generate BOOT image +################################################################################ + +# Ensure the output folder exists +mkdir -p $(dirname $BOOT_IMAGE) &>/dev/null + +set -x +"${ANDROID_SCRIPTS_PATH}/mkbootimg" \ + --kernel "${KERNEL}" \ + --ramdisk "${RAMDISK}" \ + --cmdline "${CMDLINE}" \ + --base "${ANDROID_IMAGE_BASE}" \ + --pagesize "${ANDROID_IMAGE_PAGESIZE}" \ + --os_version "${ANDROID_OS_VERSION}" \ + --os_patch_level "${ANDROID_OS_PATCH_LEVEL}" \ + ${ANDROID_TAGS_OFFSET} \ + ${ANDROID_KERNEL_OFFSET} \ + ${ANDROID_RAMDISK_OFFSET} \ + --output "${BOOT_IMAGE}" +set +x + diff --git a/tools/wltests/android/create_dt_img.sh b/tools/wltests/android/create_dt_img.sh new file mode 100755 index 0000000000000000000000000000000000000000..f60fb900db84f7d0308bc67315ae851406afde7d --- /dev/null +++ b/tools/wltests/android/create_dt_img.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +################################################################################ +# Internal configurations +################################################################################ +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/.." +source "${BASE_DIR}/helpers" +source "${DEFINITIONS_PATH}" + +DEFAULT_DTB="${KERNEL_SRC}/arch/${ARCH}/boot/dts/${KERNEL_DTB}" +DTB="${DTB:-$DEFAULT_DTB}" + +DEFAULT_DTB_IMAGE="${ARTIFACTS_PATH}/${ANDROID_DTB_IMAGE}" +DTB_IMAGE="${DTB_IMAGE:-$DEFAULT_DTB_IMAGE}" + +if [ ! -f ${DTB} ] ; then + c_error "DTB not found: ${DTB}" + exit $ENOENT +fi + +################################################################################ +# Report configuration +################################################################################ +echo +c_info "Generate DTB image:" +c_info " $DTB_IMAGE" +c_info "using this configuration :" +c_info " DTB : $DTB" +c_info " ANDROID_IMAGE_PAGESIZE : $ANDROID_IMAGE_PAGESIZE" + +# Optional arguments +if [ "x${ANDROID_DTB_COMPRESSED}"=="xYES" ]; then + c_info "- ANDROID_DTB_COMPRESSED : $ANDROID_DTB_COMPRESSED" + ANDROID_DTB_COMPRESSED="--compress" +fi + +################################################################################ +# Generate BOOT image +################################################################################ + +# Ensure the output folder exists +mkdir -p $(dirname $DTB_IMAGE) &>/dev/null + +set -x +"${ANDROID_SCRIPTS_PATH}"/mkdtimg \ + --dtb "${DTB}" \ + --pagesize "${ANDROID_IMAGE_PAGESIZE}" \ + $ANDROID_DTB_COMPRESSED \ + --output "${DTB_IMAGE}" +set +x + diff --git a/tools/wltests/android/mkbootimg b/tools/wltests/android/mkbootimg new file mode 100755 index 0000000000000000000000000000000000000000..5a13da26b0e10c19b4025aaecda4e47df513ff2b --- /dev/null +++ b/tools/wltests/android/mkbootimg @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# Copyright 2015, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +from sys import argv, exit, stderr +from argparse import ArgumentParser, FileType, Action +from os import fstat +from struct import pack +from hashlib import sha1 +import sys +import re + +def filesize(f): + if f is None: + return 0 + try: + return fstat(f.fileno()).st_size + except OSError: + return 0 + + +def update_sha(sha, f): + if f: + sha.update(f.read()) + f.seek(0) + sha.update(pack('I', filesize(f))) + else: + sha.update(pack('I', 0)) + + +def pad_file(f, padding): + pad = (padding - (f.tell() & (padding - 1))) & (padding - 1) + f.write(pack(str(pad) + 'x')) + + +def write_header(args): + BOOT_MAGIC = 'ANDROID!'.encode() + args.output.write(pack('8s', BOOT_MAGIC)) + args.output.write(pack('10I', + filesize(args.kernel), # size in bytes + args.base + args.kernel_offset, # physical load addr + filesize(args.ramdisk), # size in bytes + args.base + args.ramdisk_offset, # physical load addr + filesize(args.second), # size in bytes + args.base + args.second_offset, # physical load addr + args.base + args.tags_offset, # physical addr for kernel tags + args.pagesize, # flash page size we assume + 0, # future expansion: MUST be 0 + (args.os_version << 11) | args.os_patch_level)) # os version and patch level + args.output.write(pack('16s', args.board.encode())) # asciiz product name + args.output.write(pack('512s', args.cmdline[:512].encode())) + + sha = sha1() + update_sha(sha, args.kernel) + update_sha(sha, args.ramdisk) + update_sha(sha, args.second) + img_id = pack('32s', sha.digest()) + + args.output.write(img_id) + args.output.write(pack('1024s', args.cmdline[512:].encode())) + pad_file(args.output, args.pagesize) + return img_id + + +class ValidateStrLenAction(Action): + def __init__(self, option_strings, dest, nargs=None, **kwargs): + if 'maxlen' not in kwargs: + raise ValueError('maxlen must be set') + self.maxlen = int(kwargs['maxlen']) + del kwargs['maxlen'] + super(ValidateStrLenAction, self).__init__(option_strings, dest, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if len(values) > self.maxlen: + raise ValueError('String argument too long: max {0:d}, got {1:d}'. + format(self.maxlen, len(values))) + setattr(namespace, self.dest, values) + + +def write_padded_file(f_out, f_in, padding): + if f_in is None: + return + f_out.write(f_in.read()) + pad_file(f_out, padding) + + +def parse_int(x): + return int(x, 0) + +def parse_os_version(x): + match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x) + if match: + a = int(match.group(1)) + b = c = 0 + if match.lastindex >= 2: + b = int(match.group(2)) + if match.lastindex == 3: + c = int(match.group(3)) + # 7 bits allocated for each field + assert a < 128 + assert b < 128 + assert c < 128 + return (a << 14) | (b << 7) | c + return 0 + +def parse_os_patch_level(x): + match = re.search(r'^(\d{4})-(\d{2})-(\d{2})', x) + if match: + y = int(match.group(1)) - 2000 + m = int(match.group(2)) + # 7 bits allocated for the year, 4 bits for the month + assert y >= 0 and y < 128 + assert m > 0 and m <= 12 + return (y << 4) | m + return 0 + +def parse_cmdline(): + parser = ArgumentParser() + parser.add_argument('--kernel', help='path to the kernel', type=FileType('rb'), + required=True) + parser.add_argument('--ramdisk', help='path to the ramdisk', type=FileType('rb')) + parser.add_argument('--second', help='path to the 2nd bootloader', type=FileType('rb')) + parser.add_argument('--cmdline', help='extra arguments to be passed on the ' + 'kernel command line', default='', action=ValidateStrLenAction, maxlen=1536) + parser.add_argument('--base', help='base address', type=parse_int, default=0x10000000) + parser.add_argument('--kernel_offset', help='kernel offset', type=parse_int, default=0x00008000) + parser.add_argument('--ramdisk_offset', help='ramdisk offset', type=parse_int, default=0x01000000) + parser.add_argument('--second_offset', help='2nd bootloader offset', type=parse_int, + default=0x00f00000) + parser.add_argument('--os_version', help='operating system version', type=parse_os_version, + default=0) + parser.add_argument('--os_patch_level', help='operating system patch level', + type=parse_os_patch_level, default=0) + parser.add_argument('--tags_offset', help='tags offset', type=parse_int, default=0x00000100) + parser.add_argument('--board', help='board name', default='', action=ValidateStrLenAction, + maxlen=16) + parser.add_argument('--pagesize', help='page size', type=parse_int, + choices=[2**i for i in range(11,15)], default=2048) + parser.add_argument('--id', help='print the image ID on standard output', + action='store_true') + parser.add_argument('-o', '--output', help='output file name', type=FileType('wb'), + required=True) + return parser.parse_args() + + +def write_data(args): + write_padded_file(args.output, args.kernel, args.pagesize) + write_padded_file(args.output, args.ramdisk, args.pagesize) + write_padded_file(args.output, args.second, args.pagesize) + + +def main(): + args = parse_cmdline() + img_id = write_header(args) + write_data(args) + if args.id: + if isinstance(img_id, str): + # Python 2's struct.pack returns a string, but py3 returns bytes. + img_id = [ord(x) for x in img_id] + print('0x' + ''.join('{:02x}'.format(c) for c in img_id)) + +if __name__ == '__main__': + main() diff --git a/tools/wltests/android/mkdtimg b/tools/wltests/android/mkdtimg new file mode 100755 index 0000000000000000000000000000000000000000..6572204318c47e5e63f787f8dd2e122b4fcea6ad --- /dev/null +++ b/tools/wltests/android/mkdtimg @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# Copyright 2017, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function +try: + from os import fstat, stat, remove + from sys import exit + from argparse import ArgumentParser, FileType + from ctypes import sizeof, Structure, c_char, c_int + from struct import pack, calcsize + import zlib +except Exception as e: + print("some module is needed:" + str(e)) + exit(-1) + +dt_head_info_fmt = '4sII' +dt_entry_fmt = 'Q4I2Q' +dtimg_version = 1 +dtb_count = 1 + +def write32(output, value): + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) + +def compress(filename, input, output): + output.write('\037\213\010') + output.write(chr(0)) + + statval = stat(filename) + write32(output, 0) + output.write('\002') + output.write('\003') + + crcval = zlib.crc32("") + compobj = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, + zlib.DEF_MEM_LEVEL, 0) + while True: + data = input.read(1024) + if data == "": + break + crcval = zlib.crc32(data, crcval) + output.write(compobj.compress(data)) + output.write(compobj.flush()) + write32(output, crcval) + write32(output, statval.st_size) + +def dtb_compress(dtb_file): + try: + outputname = dtb_file + '.gz' + input = open(dtb_file, 'rb') + output = open(outputname, 'wb') + compress(dtb_file, input, output) + input.close() + output.close() + except Exception as e: + print('dtb_compress error:' + str(e)) + exit(-1) + return outputname + +class dt_head_info(Structure): + _fields_ = [('magic', c_char * 4), + ('version', c_int), + ('dt_count', c_int)] + +class dt_entry_t(Structure): + _fields_ = [('dtb_size', c_int), + ('dtb_offset', c_int)] + +def align_page_size(offset, pagesize): + return (pagesize - (offset % pagesize)) + +def write_head_info(head_info, args): + args.output.write(pack(dt_head_info_fmt, + head_info.magic, + head_info.version, + head_info.dt_count)) + +def write_dtb_entry_t(dt_entry, args): + args.output.write(pack(dt_entry_fmt, + 0, # reserved + dt_entry.dtb_size, + 0, # reserved + dt_entry.dtb_offset, + 0, # reserved + 0, # reserved + 0)) # reserved + +def write_padding(args, padding): + for i in range(0, padding): + args.output.write('\x00') + +def write_dtb(args): + dtb_file = args.dtb + out_dtb = dtb_file + if args.compress == True: + out_dtb = dtb_compress(dtb_file) + try: + dtb_offset = calcsize(dt_head_info_fmt) + \ + calcsize(dt_entry_fmt) + \ + 4 + padding = align_page_size(dtb_offset, args.pagesize) + dtb_size = stat(out_dtb).st_size + dtb_size_padding = align_page_size(dtb_size, args.pagesize) + dt_entry = dt_entry_t(dtb_size + dtb_size_padding, + dtb_offset + padding) + write_dtb_entry_t(dt_entry, args) + args.output.write(pack('I', 0)) # SUCCESS code number + write_padding(args, padding) + with open(out_dtb, 'rb') as dtb_fd: + args.output.write(dtb_fd.read(dtb_size)) + write_padding(args, dtb_size_padding) + except Exception as e: + print('write dtb error:' + str(e)) + exit(-1) + +def clean_gz_file(args): + try: + if args.compress != True: + return + remove(args.dtb + '.gz') + except Exception as e: + print('clean gz file error:' + str(e)) + exit(-1) + +def parse_cmdline(): + parser = ArgumentParser() + parser.add_argument('-c', '--compress', help='compress dtb or not', + action='store_true') + parser.add_argument('-d', '--dtb', help='path to the dtb', type=str, + required=True) + parser.add_argument('-s', '--pagesize', help='align page size', + type=int, choices=[2**i for i in range(11,15)], + default=2048) + parser.add_argument('-o', '--output', help='output file name', + type=FileType('wb'), required=True) + return parser.parse_args() + +def main(): + args = parse_cmdline() + dtimg_head_info = dt_head_info('HSDT', dtimg_version, dtb_count) + write_head_info(dtimg_head_info, args) + write_dtb(args) + clean_gz_file(args) + +if __name__ == '__main__': + main() diff --git a/tools/wltests/android/split_bootimg.pl b/tools/wltests/android/split_bootimg.pl new file mode 100755 index 0000000000000000000000000000000000000000..d3abee078eb674f5e765f1fd6a585915ae5a7015 --- /dev/null +++ b/tools/wltests/android/split_bootimg.pl @@ -0,0 +1,212 @@ +#!/usr/bin/perl +###################################################################### +# +# File : split_bootimg.pl +# Author(s) : William Enck +# Description : Split appart an Android boot image created +# with mkbootimg. The format can be found in +# android-src/system/core/mkbootimg/bootimg.h +# +# Thanks to alansj on xda-developers.com for +# identifying the format in bootimg.h and +# describing initial instructions for splitting +# the boot.img file. +# +# Last Modified : Tue Dec 2 23:36:25 EST 2008 +# By : William Enck +# +# Copyright (c) 2008 William Enck +# +###################################################################### + +use strict; +use warnings; + +# Turn on print flushing +$|++; + +###################################################################### +## Global Variables and Constants + +my $SCRIPT = __FILE__; +my $IMAGE_FN = undef; + +# Constants (from bootimg.h) +use constant BOOT_MAGIC => 'ANDROID!'; +use constant BOOT_MAGIC_SIZE => 8; +use constant BOOT_NAME_SIZE => 16; +use constant BOOT_ARGS_SIZE => 512; + +# Unsigned integers are 4 bytes +use constant UNSIGNED_SIZE => 4; + +# Parsed Values +my $PAGE_SIZE = undef; +my $KERNEL_SIZE = undef; +my $RAMDISK_SIZE = undef; +my $SECOND_SIZE = undef; + +###################################################################### +## Main Code + +&parse_cmdline(); +&parse_header($IMAGE_FN); + +=format (from bootimg.h) +** +-----------------+ +** | boot header | 1 page +** +-----------------+ +** | kernel | n pages +** +-----------------+ +** | ramdisk | m pages +** +-----------------+ +** | second stage | o pages +** +-----------------+ +** +** n = (kernel_size + page_size - 1) / page_size +** m = (ramdisk_size + page_size - 1) / page_size +** o = (second_size + page_size - 1) / page_size +=cut + +my $n = int(($KERNEL_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); +my $m = int(($RAMDISK_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); +my $o = int(($SECOND_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); + +my $k_offset = $PAGE_SIZE; +my $r_offset = $k_offset + ($n * $PAGE_SIZE); +my $s_offset = $r_offset + ($m * $PAGE_SIZE); + +(my $base = $IMAGE_FN) =~ s/.*\/(.*)$/$1/; +my $k_file = $base . "-kernel"; +my $r_file = $base . "-ramdisk.gz"; +my $s_file = $base . "-second.gz"; + +# The kernel is always there +print "Writing $k_file ..."; +&dump_file($IMAGE_FN, $k_file, $k_offset, $KERNEL_SIZE); +print " complete.\n"; + +# The ramdisk is always there +print "Writing $r_file ..."; +&dump_file($IMAGE_FN, $r_file, $r_offset, $RAMDISK_SIZE); +print " complete.\n"; + +# The Second stage bootloader is optional +unless ($SECOND_SIZE == 0) { + print "Writing $s_file ..."; + &dump_file($IMAGE_FN, $s_file, $s_offset, $SECOND_SIZE); + print " complete.\n"; +} + +###################################################################### +## Supporting Subroutines + +=header_format (from bootimg.h) +struct boot_img_hdr +{ + unsigned char magic[BOOT_MAGIC_SIZE]; + unsigned kernel_size; /* size in bytes */ + unsigned kernel_addr; /* physical load addr */ + unsigned ramdisk_size; /* size in bytes */ + unsigned ramdisk_addr; /* physical load addr */ + unsigned second_size; /* size in bytes */ + unsigned second_addr; /* physical load addr */ + unsigned tags_addr; /* physical addr for kernel tags */ + unsigned page_size; /* flash page size we assume */ + unsigned unused[2]; /* future expansion: should be 0 */ + unsigned char name[BOOT_NAME_SIZE]; /* asciiz product name */ + unsigned char cmdline[BOOT_ARGS_SIZE]; + unsigned id[8]; /* timestamp / checksum / sha1 / etc */ +}; +=cut +sub parse_header { + my ($fn) = @_; + my $buf = undef; + + open INF, $fn or die "Could not open $fn: $!\n"; + binmode INF; + + # Read the Magic + read(INF, $buf, BOOT_MAGIC_SIZE); + unless ($buf eq BOOT_MAGIC) { + die "Android Magic not found in $fn. Giving up.\n"; + } + + # Read kernel size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($k_size, $k_addr) = unpack("VV", $buf); + + # Read ramdisk size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($r_size, $r_addr) = unpack("VV", $buf); + + # Read second size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($s_size, $s_addr) = unpack("VV", $buf); + + # Ignore tags_addr + read(INF, $buf, UNSIGNED_SIZE); + + # get the page size (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE); + my ($p_size) = unpack("V", $buf); + + # Ignore unused + read(INF, $buf, UNSIGNED_SIZE * 2); + + # Read the name (board name) + read(INF, $buf, BOOT_NAME_SIZE); + my $name = $buf; + + # Read the command line + read(INF, $buf, BOOT_ARGS_SIZE); + my $cmdline = $buf; + + # Ignore the id + read(INF, $buf, UNSIGNED_SIZE * 8); + + # Close the file + close INF; + + # Print important values + printf "Page size: %d (0x%08x)\n", $p_size, $p_size; + printf "Kernel size: %d (0x%08x)\n", $k_size, $k_size; + printf "Ramdisk size: %d (0x%08x)\n", $r_size, $r_size; + printf "Second size: %d (0x%08x)\n", $s_size, $s_size; + printf "Board name: $name\n"; + printf "Command line: $cmdline\n"; + + # Save the values + $PAGE_SIZE = $p_size; + $KERNEL_SIZE = $k_size; + $RAMDISK_SIZE = $r_size; + $SECOND_SIZE = $s_size; +} + +sub dump_file { + my ($infn, $outfn, $offset, $size) = @_; + my $buf = undef; + + open INF, $infn or die "Could not open $infn: $!\n"; + open OUTF, ">$outfn" or die "Could not open $outfn: $!\n"; + + binmode INF; + binmode OUTF; + + seek(INF, $offset, 0) or die "Could not seek in $infn: $!\n"; + read(INF, $buf, $size) or die "Could not read $infn: $!\n"; + print OUTF $buf or die "Could not write $outfn: $!\n"; + + close INF; + close OUTF; +} + +###################################################################### +## Configuration Subroutines + +sub parse_cmdline { + unless ($#ARGV == 0) { + die "Usage: $SCRIPT boot.img\n"; + } + $IMAGE_FN = $ARGV[0]; +} diff --git a/tools/wltests/build b/tools/wltests/build new file mode 100755 index 0000000000000000000000000000000000000000..6fea5dfc346985fda2fefb636abfb901e40e058e --- /dev/null +++ b/tools/wltests/build @@ -0,0 +1,305 @@ +#!/bin/bash +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Internal configurations +################################################################################ + +BASE_DIR=$(dirname $(realpath -s $0)) +source "${BASE_DIR}/helpers" + + +################################################################################ +# Configuration options +################################################################################ + +usage() { + cat < merge this list of CONFIGs + --silent report only error/warning messages + + -h, --help print help and exit + + Usage notes: + 1) Without OPTIONS it builds the kernel with platform defined configs + 2) To force a kernel full build, when -m/-s/-c/-d/-x are given, add also -b + 3) If -k is _not_ given, the current .config will be altered by CONFIG_CMD or DEFCONFIG + +EOF +} + +# Configuration options +YES=1 +NO=0 + +# Configured targets +MAKE_MENUCONFIG=$NO +MAKE_SAVEDEFCONFIG=$NO +MAKE_KEEPCONFIG=$NO +BUILD_DTBS=$NO +BUILD_MODULES=$NO +BUILD_IMAGE=$NO +CLEAN_KERNEL=$NO +DISTCLEAN_KERNEL=$NO +CLEAN_ALL=$NO +USE_CCACHE=${USE_CACHE:-$NO} +OUT=/dev/stdout + +# With no options: build the default target +BUILD_NOW=$NO +[ $# != 0 ] || BUILD_NOW=$YES + +while [[ $# -gt 0 ]]; do + case $1 in + -b|--build) + BUILD_NOW=$YES + ;; + -c|--clean) + CLEAN_KERNEL=$YES + ;; + -d|--distclean) + DISTCLEAN_KERNEL=$YES + ;; + -i|--image) + BUILD_IMAGE=$YES + ;; + -k|--keepconfig) + MAKE_KEEPCONFIG=$YES + BUILD_NOW=$YES + ;; + -m|--menuconfig) + MAKE_MENUCONFIG=$YES + # Make menuconfig + # If no build option is given, it won't build + ;; + -s|--savedefconfig) + MAKE_SAVEDEFCONFIG=$YES + # Make savedefconfig + # If no build option(-b) is given, it won't build + ;; + -t|--dtbs) + BUILD_DTBS=$YES + ;; + -x|--clean_all) + CLEAN_ALL=$YES + ;; + --modules) + BUILD_MODULES=$YES + ;; + + -a|--use-ccache) + USE_CCACHE=$YES + ;; + -l|--config_list) + BUILD_CONFIG_LIST="$2" + shift + ;; + --silent) + OUT=/dev/null + ;; + + -h|--help) + usage + exit $OK + ;; + *) + usage + exit $EAGAIN + ;; + esac + shift # past argument or value +done + +# Format configuration +format_conf() { + CONF=$1 + if [ $CONF -eq $YES ]; then + echo -n "YES" + return + fi + echo -n " NO" +} + +# Print information about behavior +echo +c_info "Build configuration:" +c_info " Menuconfig : $(format_conf ${MAKE_MENUCONFIG})" +c_info " Savedefconfig : $(format_conf ${MAKE_SAVEDEFCONFIG})" +c_info " Keepconfig : $(format_conf ${MAKE_KEEPCONFIG})" +c_info " Build full : $(format_conf ${BUILD_NOW})" +c_info " Build Image : $(format_conf ${BUILD_IMAGE})" +c_info " Build dtbs : $(format_conf ${BUILD_DTBS})" +c_info " Build modules : $(format_conf ${BUILD_MODULES})" +c_info " Clean : $(format_conf ${CLEAN_KERNEL})" +c_info " Distclean : $(format_conf ${DISTCLEAN_KERNEL})" +c_info " Clean all : $(format_conf ${CLEAN_ALL})" +c_info " Use ccache : $(format_conf ${USE_CCACHE})" + +# Load platform definitions +source "${DEFINITIONS_PATH}" + +# Print information about platform +echo +c_info "Platform configuration:" +c_info " PLATFORM_NAME : ${PLATFORM_NAME}" +c_info " DEFCONFIG : ${DEFCONFIG}" +c_info " CONFIG_CMD : ${CONFIG_CMD}" +c_info " ARCH : ${ARCH}" +c_info " CROSS_COMPILE : ${CROSS_COMPILE}" + + +################################################################################ +# Configuring builds +################################################################################ + +# Export paths +export LD_LIBRARY_PATH="${TOOLS_PATH}/lib/:$LD_LIBRARY_PATH" +export PATH="${TOOLS_PATH}:$PATH" + +# Check for a valid toolchain +which ${CROSS_COMPILE}gcc &>/dev/null +if [ $? -ne 0 ]; then + echo + echo + c_error "(Cross)compiler [${CROSS_COMPILE}gcc] not found!" + c_warning "Ensure to have CROSS_COMPILE set to a valid toolchain" + c_warning "which should be reachable from your PATH" + echo + exit $EAGAIN +fi + +# Export compiler configuration +export ARCH +export CROSS_COMPILE +if [ "${USE_CCACHE}" -eq $YES ]; then + export CC="ccache ${CROSS_COMPILE}gcc" + export CXX="ccache ${CROSS_COMPILE}g++" +fi + +# Enable parallel builds +NCPUS="$(( 2 * $(nproc) ))" + + +################################################################################ +# Build cleanup +################################################################################ + +if [ "${CLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then + (cd "${KERNEL_SRC}"; make clean) +fi + +if [ "${DISTCLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then + (cd "${KERNEL_SRC}"; make distclean) +fi + +if [ "${MAKE_KEEPCONFIG}" -eq $YES ]; then + c_warning "building with current .config" +fi + + +################################################################################ +# Build configuration +################################################################################ + +if [ ! -z "${CONFIG_CMD}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then + c_info "Running CONFIG_CMD..." + (set -x; cd "${KERNEL_SRC}"; ${CONFIG_CMD}) +fi + +if [ ! -z "${DEFCONFIG}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then + c_info "Running DEFCONFIG [$DEFCONFIG]..." + (set -x; cd "${KERNEL_SRC}"; make ${DEFCONFIG}) +fi + +if [ ! -z "${BUILD_CONFIG_LIST}" ]; then + c_info "Running [merge_configs.sh]..." + list_configs=(${BUILD_CONFIG_LIST}) + timestamp=$(date +%s) + tmp_file="/tmp/${timestamp}" + for config in "${list_configs[@]}"; do + echo ${config} >> "${tmp_file}" + done + if [ -f "${KERNEL_SRC}/scripts/kconfig/merge_config.sh" ]; then + (set -x; cd "${KERNEL_SRC}"; \ + ./scripts/kconfig/merge_config.sh -m \ + .config ${tmp_file}) + else + c_error "No merge_config.sh script found" + exit $ENOENT + fi + rm -f ${tmp_file} + c_info "Running oldconfig after merge of configs" + (set -x; cd "${KERNEL_SRC}"; \ + yes "" 2>/dev/null | make oldconfig) +fi + + +################################################################################ +# Make the requried target +################################################################################ + +if [ ${BUILD_IMAGE} -eq $YES ]; then + c_info "Making [Image]..." + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} "$KERNEL_IMAGE" >${OUT}) +fi + +if [ ${BUILD_DTBS} -eq $YES ]; then + c_info "Making [dtbs]..." + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} dtbs >${OUT}) +fi + +if [ ${BUILD_MODULES} -eq $YES ]; then + c_info "Making [modules]..." + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} modules >${OUT}) +fi + +if [ ${MAKE_MENUCONFIG} -eq $YES ]; then + c_info "Making [menuconfig]..." + (set -x; cd "${KERNEL_SRC}"; make menuconfig) +fi + +if [ ${MAKE_SAVEDEFCONFIG} -eq $YES ]; then + c_info "Making [savedefconfig]..." + (set -x; cd "${KERNEL_SRC}"; make savedefconfig) +fi + +if [ ${BUILD_NOW} -eq $YES ]; then + c_info "Making default target..." + (set -x; cd "${KERNEL_SRC}" make -j${NCPUS} >${OUT}) +fi + diff --git a/tools/wltests/helpers b/tools/wltests/helpers new file mode 100644 index 0000000000000000000000000000000000000000..8922b3f6832f1c9afe4dc173d5da1834ebc2492a --- /dev/null +++ b/tools/wltests/helpers @@ -0,0 +1,209 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Paths +################################################################################ + +# Setup a minimal path for the build scripts +BASE_DIR=$(realpath $BASE_DIR) + +# Required options +KERNEL_PATH=${KERNEL_PATH:-"/usr/src/linux"} + +# Generics +TOOLS_PATH="${BASE_DIR}/tools" +DEFINITIONS_PATH="${PLATFORM_PATH}/definitions" +ARTIFACTS_PATH="${PLATFORM_PATH}/artifacts" + +# Android +ANDROID_SCRIPTS_PATH="${BASE_DIR}/android" +ANDROID_OUTPUT_PATH="${PLATFORM_PATH}/artifacts" + + +################################################################################ +# Exit codes +################################################################################ + +OK=0 # Success +ENOENT=2 # No such file or directory +EIO=5 # I/O error +EAGAIN=11 # Try again +ENODEV=19 # No such device +EINVAL=22 # Invalid argument + +# Helper definitions +FATAL_ERROR=1 +NONFATAL_ERROR=2 +SUCCESS_CODE=$OK + + +################################################################################ +# Logging functions +################################################################################ +c_error() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${red}$NOW - ERROR: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + + local message="$1" + echo -e "${red}$NOW - ERROR : ${message}${nocol}" +} + +c_warning() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${yellow}$NOW - WARNING: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + local message="$1" + echo -e "${yellow}$NOW - WARNING : ${message}${nocol}" +} + +c_info() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${blue}$NOW - INFO: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + local message="$1" + echo -e "${blue}$NOW - INFO : ${message}${nocol}" +} + +d_notify() { + MESSAGE=$1 + ICON=$2 + # Let's try to send a desktop notification, + # silently fails if there is not support. + notify-send \ + --icon=$ICON \ + --urgency=critical \ + --expire-time=1500 \ + "Test Series" \ + "$MESSAGE" \ + 2>/dev/null +} + +my_tput() { + if [ "${TERM-dumb}" == dumb ]; then + return + fi + tput $* +} + +box_out() +{ + local s=("$@") b w + for l in "${s[@]}"; do + ((w<${#l})) && { b="$l"; w="${#l}"; } + done + my_tput setaf 3 + echo -e "|-${b//?/-}-|" + for l in "${s[@]}"; do + printf '| %s%*s%s |\n' "$(my_tput setaf 4)" "-$w" "$l" "$(my_tput setaf 3)" + # echo "|-${b//?/-}-|" + done + echo "|-${b//?/-}-|" + my_tput sgr 0 +} + + +################################################################################ +# Utilities +################################################################################ + +c_extract() { + if [ -f $1 ] ; then + case $1 in + *.tar.xz) + tar xvJf $1 -C $2 + ;; + *.tar.bz2) + tar xvjf $1 -C $2 + ;; + *.tar.gz) + tar xvzf $1 -C $2 + ;; + *.rar) + unrar x $1 $2 + ;; + *.tar) + tar xvf $1 -C $2 + ;; + *.tbz2) + tar xvjf $2 -C $2 + ;; + *.tgz) + tar xvzf $1 -C $2 + ;; + *.zip) + unzip $1 -d $2 + ;; + *.7z) + 7z x $1 -o $2 + ;; + *) + c_error "don't know to extract archive $1" + exit $EINVAL + ;; + esac + else + c_error "'$1' is not a valid file" + exit $ENOENT + fi +} + + +################################################################################ +# Colors +################################################################################ + +if [ -t 1 ]; then + ncolors=$(my_tput colors) + if [ -n "${ncolors}" ] && [ ${ncolors} -ge 8 ]; then + nocol='\e[0m' # No Color + white='\e[1;37m' + black='\e[0;30m' + blue='\e[0;34m' + lblue='\e[1;34m' + green='\e[0;32m' + lgreen='\e[1;32m' + cyan='\e[0;36m' + lcyan='\e[1;36m' + red='\e[0;31m' + lred='\e[1;31m' + purple='\e[0;35m' + lpurple='\e[1;35m' + brown='\e[0;33m' + yellow='\e[1;33m' + grey='\e[0;30m' + lgrey='\e[0;37m' + fi +fi + diff --git a/tools/wltests/platforms/hikey960_android-4.4/build_images b/tools/wltests/platforms/hikey960_android-4.4/build_images new file mode 100755 index 0000000000000000000000000000000000000000..c2ae39b2e8820d4c70324b15e472b243d69080e2 --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/build_images @@ -0,0 +1,20 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/../.." +source "${BASE_DIR}/helpers" +source "${PLATFORM_PATH}/definitions" + +################################################################################ +# Build all images required to test a new kernel +################################################################################ + +./build --image --dtbs --silent; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + +./android/create_boot_img.sh; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + +./android/create_dt_img.sh; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + diff --git a/tools/wltests/platforms/hikey960_android-4.4/definitions b/tools/wltests/platforms/hikey960_android-4.4/definitions new file mode 100644 index 0000000000000000000000000000000000000000..ddb4677cfa22d22084770aa96246e917166d763d --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/definitions @@ -0,0 +1,47 @@ + +################################################################################ +# Build definitions for Hikey960 +################################################################################ + +PLATFORM_NAME="Hikey960_Android" + +################################################################################ +# Boot Image Configuration +################################################################################ + +KERNEL_IMAGE="${KERNEL_IMAGE:-Image.gz}" +KERNEL_CMDLINE="${KERNEL_CMDLINE:-\ + loglevel=15\ + androidboot.hardware=hikey960\ + androidboot.selinux=permissive\ + firmware_class.path=/system/etc/firmware\ + buildvariant=userdebug\ +}" +RAMDISK_IMAGE="${RAMDISK_IMAGE:-ramdisk.gz}" +KERNEL_DTB="${KERNEL_DTB:-hisilicon/hi3660-hikey960.dtb}" + +ANDROID_BOOT_IMAGE="${ANDROID_BOOT_IMAGE:-boot.img}" +ANDROID_IMAGE_BASE="${ANDROID_IMAGE_BASE:-0x0}" +ANDROID_IMAGE_PAGESIZE="${ANDROID_IMAGE_PAGESIZE:-2048}" +ANDROID_OS_VERSION="${ANDROID_OS_VERSION:-O}" +ANDROID_OS_PATCH_LEVEL="${ANDROID_OS_PATCH_LEVEL:-2017-04-05}" +ANDROID_TAGS_OFFSET="${ANDROID_TAGS_OFFSET:-0x07A00000}" +ANDROID_KERNEL_OFFSET="${ANDROID_KERNEL_OFFSET:-0x00080000}" +ANDROID_RAMDISK_OFFSET="${ANDROID_RAMDISK_OFFSET:-0x07C00000}" + +################################################################################ +# Device Tree Configuration +################################################################################ + +ANDROID_DTB_IMAGE="${ANDROID_DTB_IMAGE:-dts.img}" +ANDROID_DTB_COMPRESSED="${ANDROID_DTB_COMPRESSED:-YES}" + +################################################################################ +# Toolchain Configuration +################################################################################ + +DEFCONFIG="${DEFCONFIG:-hikey960_defconfig}" +CONFIG_CMD="${CONFIG_CMD:-}" +ARCH="${ARCH:-arm64}" +CROSS_COMPILE="${CROSS_COMPILE:-aarch64-linux-android-}" + diff --git a/tools/wltests/platforms/hikey960_android-4.4/flash_images b/tools/wltests/platforms/hikey960_android-4.4/flash_images new file mode 100755 index 0000000000000000000000000000000000000000..926fde50e50ecbccc9c112203aeba7cb1a915507 --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/flash_images @@ -0,0 +1,25 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/../.." +source "${BASE_DIR}/helpers" +source "${PLATFORM_PATH}/definitions" + +################################################################################ +# Build all images required to test a new kernel +################################################################################ + +ls *.img >/dev/null; ERROR=$? +if [ $ERROR -ne 0 ]; then + c_error "No images to flash in $PWD" + exit $ENOENT +fi + +for IMAGE in $(ls *.img); do + PARTITION=${IMAGE%%.img} + echo + c_info "Flashing [$IMAGE] on [$PARTITION] partition..." + $FASTBOOT flash $PARTITION $IMAGE; ERROR=$? + [ $ERROR -eq 0 ] || exit $ERROR +done + diff --git a/tools/wltests/test_series b/tools/wltests/test_series new file mode 100755 index 0000000000000000000000000000000000000000..a2ec8c7dcb5ff1e3c41707355c019cd60c4e28fc --- /dev/null +++ b/tools/wltests/test_series @@ -0,0 +1,871 @@ +#!/bin/bash +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Other internal configurations +################################################################################ +BASE_DIR=$(dirname $(realpath $0)) +source "${BASE_DIR}/helpers" + + +################################################################################ +# Builds configuration +################################################################################ + +PLATFORM=${PLATFORM:-'hikey960_android-4.4'} +KERNEL_SRC=${KERNEL_SRC:-$BASE_DIR/kernel} +SERIES=${SERIES:-''} +WA_AGENDA=${WA_AGENDA:-''} +TEST_CMD=${TEST_CMD:-'echo "Test DONE!"'} + +if [ -z "$ANDROID_HOME" ]; then + ADB=${ADB:-$(which adb)} + FASTBOOT=${FASTBOOT:-$(which fastboot)} +else + ADB=${ADB:-$ANDROID_HOME/platform-tools/adb} + FASTBOOT=${FASTBOOT:-$ANDROID_HOME/platform-tools/fastboot} +fi + +EMETER=${EMETER:-'ACME'} +ACME_IP=${ACME_IP:-'192.168.0.1'} +ACME_USB=${ACME_USB:-'device1'} +ACME_CHANNELS=${ACME_CHANNELS:-'0'} + +RESULTS=${RESULTS:-$LISA_HOME/results/wltests} +FORCE=${FORCE:-0} +DEVICE=${DEVICE:-'C00010FFBAADA555'} +REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-180} + +DRYRUN=${DRYRUN:-0} + + +################################################################################ +# Check configuration +################################################################################ + +usage() { + cat < + + The list obtained by the previous command can + be enriched by adding an "ID:" in front of each + line. + If an "ID:" column is present, the commits will + be considered in progressive "ID:" order while + discarding those with ID=00: + Default: SERIES='' + +Target device to use for kernel testing: + + -p, --platform The platform to target. + Available platforms are the subfolders of the + top level "platforms" folder. + Default: PLATFORM=juno_android-4.4 + + --supported_platforms Print a list of supported platforms which can + are valid values for the -p parameter. + + -d, --device The Android device to target. + If ANDROID_DEVICE is set, its value will be used + as a default. + Default DEVICE=$ANDROID_DEVICE + +Tests to run for each kernel tested on target: + + -a, --wa_agenda PATH The path of a WA agenda describing the experiments + to be executed for each test in SERIES. + Default: WA_AGENDA='' + + --available_agendas Print a list of available agendas. + + -t, --test_cmd The command line of A custom test script to run + for each kernel. + Default: TEST_CMD='echo "Test DONE!"' + +Additional arguments: + + --adb The ADB binary to use. + Default: ADB=[ANDROID_HOME/platform-tools/adb|\$(which adb)] + --fastboot The FASTBOOT binary to use. + Default: FASTBOOT=[ANDROID_HOME/platform-tools/fatboot|\$(which fatboot)] + + --emeter The Energy Meter used to power-cycle the device. + Default: EMETER=ACME + --acme_ip The IP address of an ACME energy meter. + Default: ACME_IP=192.168.0.1 + --acme_usb The ACME channel used to control a USB + assthrought connetion. + Default ACME_USB=device1 + --acme_channels A space separated list for channels ID to sample. + For example, --acme_channels "0 2" will enabled + sampling on: + iio:device0 and iio:device2 + Default: ACME_CHANNELS="0" + + --results PATH The base path for all the generated result folders. + Default: RESULTS='$RESULTS' + + --force Force execution with a non empty RESULTS folder. + + --reboot_timeout Maximum number of seconds to wait for a device + to complete the boot. + Default REBOOT_TIMEOUT=180 + + --dryrun Don't actually run any command + Deafult: DRYRUN=0 + +Example command: + + $> lisa-wltest-series --kernel_src /path/to/your/kernel/hikey-linaro \\ + --series /path/to/your/series.sha1 --platform hikey960_android-4.4 \\ + --wa_agenda /path/to/your/agenda.yaml + +EOF +} + +ASSETS_REQUIRED="definitions build_images flash_images" +list_supported_platforms() { + echo + echo "Supported platforms are:" + ls $BASE_DIR/platforms | while read PLAT; do + RESULT=$OK + for ASSET in $ASSETS_REQUIRED; do + if [ ! -f $BASE_DIR/platforms/$PLAT/$ASSET ]; then + RESULT=$ENOENT; break; + fi + done + [ $RESULT == $OK ] || continue + echo " - $PLAT" + done + echo + echo "New platforms can be added by copying and adapting one of the folder available" + echo "under the this base path:" + echo " $BASE_DIR/platforms" + echo +} + +list_available_agendas() { + echo + echo "Available agendas are: " + for AGENDA in $(ls $BASE_DIR/agendas/*.yaml); do + echo " - $(basename $AGENDA)" + done + echo + echo "New agendas can be added by copying and adapting one of those available" + echo "under the folder:" + echo " $BASE_DIR/agendas" + echo +} + +while [[ $# -gt 0 ]]; do + case $1 in + + # Kernel options + -k|--kernel_src) + KERNEL_SRC=$2 + shift + ;; + -s|--series) + SERIES=$2 + shift + ;; + + # Platform options + -p|--platform) + PLATFORM=$2 + shift + ;; + -d|--device) + DEVICE=$2 + shift + ;; + --supported_platforms) + list_supported_platforms + exit $OK + ;; + + # Tests options + -a|--wa_agenda) + WA_AGENDA=$2 + shift + ;; + -t|--test_cmd) + TEST_CMD=$2 + shift + ;; + --available_agendas) + list_available_agendas + exit $OK + ;; + + + # Tools options + --adb) + ADB=$2 + shift + ;; + --fastboot) + FASTBOOT=$2 + shift + ;; + + # Energy meter options + --emeter) + EMETER=$2 + shift + ;; + --acme_ip) + ACME_IP=$2 + shift + ;; + --acme_usb) + ACME_USB=$2 + shift + ;; + --acme_channels) + ACME_CHANNELS=$2 + shift + ;; + + # Execution customization + --results) + RESULTS=$2 + shift + ;; + --force) + FORCE=1 + ;; + --dryrun) + DRYRUN=1 + ;; + + # Usage notes + -h|--help) + usage + exit $OK + ;; + *) + c_error "Unknown option: $1" + usage + exit $EAGAIN + ;; + esac + shift # past argument or value +done + +# Prepare DEVICE +if [ "x$DEVICE" == "xC00010FFBAADA555" ]; then + if [ -z "$ANDROID_DEVICE" ]; then + echo + c_error "Target device not set" + echo + c_info "A target device must be specified by either" + c_info " - setting a value for \$ANDROID_SERIAL" + c_info " - setting a value for \$DEVICE" + c_info " - passing a --device option" + echo + else + DEVICE=$ANDROID_SERIAL + fi + exit $EINVAL +fi + +# Prepare PLATFORM +export PLATFORM_PATH=$BASE_DIR/platforms/$PLATFORM +for ASSET in $ASSETS_REQUIRED; do +if [ ! -f $PLATFORM_PATH/$ASSET ]; then + echo + c_error "The specified PLATFORM=$PLATFORM is not supported, or it does not provide a [$ASSET] file" + list_supported_platforms + exit $EINVAL +fi +done +export PLATFORM_OVERLAY_PATH=$PLATFORM_PATH + +# Prepare KERNEL_SRC +export KERNEL_SRC=$(realpath -s $KERNEL_SRC) +grep -E "mainmenu .* Kernel Configuration" $KERNEL_SRC/Kconfig &>/dev/null +if [ $? -ne 0 ]; then + echo + c_error "The \$KERNEL_SRC|--kernel_src seems not to a valid kernel source tree path" + echo + exit $EINVAL +fi + +# Prepare SERIES +if [ -z "$SERIES" ]; then + echo + c_error "A valid commit series should be defined by \$SERIES|--series" + echo + exit $EINVAL +fi +if [ ! -f $SERIES ]; then + echo + c_error "\$SERIES|--series points to a non existing commit series" + echo + exit $ENOENT +fi + +# Prepare RESULTS +if [ $FORCE -eq 0 -a \ + -d $RESULTS -a \ + "$(ls -A $RESULTS)" ]; then + echo + c_warning "The results folder:" + c_warning " $RESULTS" + c_warning "already exists and is not empty." + echo + c_info "Use \$FORCE|--force if you want to override results in that same folder" + echo + exit $EINVAL +fi + +# Prepare WA_AGENDA (if specified it override the TEST_CMD) +if [ ! -z $WA_AGENDA ]; then + if [ ! -f $WA_AGENDA ]; then + echo + c_error "The \$WA_AGENDA|--wa_agenda is not a valid path for a WA agenda" + list_available_agendas + exit $INVAL + fi + # Generate a WA configuration fragment for the ACME cape + IIO_DEVICES="" + for CH in $ACME_CHANNELS; do + IIO_DEVICES="$IIO_DEVICES \"iio:device$CH\", " + done + ACME_CONF=$RESULTS/config_acme.yaml + mkdir -p $RESULTS &>/dev/null + cat >$ACME_CONF < /sys/bus/iio/devices/iio:$ACME_USB/in_active" + ;; + *) + c_error "Energy meter $EMETER not supported" + exit $EINVAL + ;; + esac +} + +usb_connect() { + c_info "Connecting USB..." + if [ $DRYRUN ]; then return; fi + case $EMETER in + 'ACME') + ssh root@$ACME_IP \ + "echo 1 > /sys/bus/iio/devices/iio:$ACME_USB/in_active" + ;; + *) + c_error "Energy meter $EMETER not supported" + exit $EINVAL + ;; + esac + sleep 5 +} + +################################################################################ +# FASTBOOT mode checking +################################################################################ +device_in_fastboot() { + [[ $(fastboot devices | grep -e "$DEVICE.*fastboot" | wc -l) -gt 0 ]] || return $ENODEV + return $OK +} +device_not_in_fastboot() { + device_in_fastboot || return $OK + return $EAGAIN +} + +################################################################################ +# ADB mode checking +################################################################################ +device_in_adb() { + [[ $(adb devices | grep -e "$DEVICE.*device" | wc -l) -gt 0 ]] || return $ENODEV + return $OK +} +device_not_in_adb() { + device_in_adb || return $OK + return $EAGAIN +} + +################################################################################ +# DEVICE connection checks +################################################################################ +device_connected() { + seconds=${1:-5} + + [ $DRYRUN -eq 1 ] || \ + while [[ true ]]; do + device_not_in_fastboot || return $OK + device_not_in_adb || return $OK + + echo + c_warning "Device not in FASTBOOT nor in ADB mode" + let seconds-- + if [[ $seconds -eq 0 ]]; then + c_error "device not connected" + return $ENODEV + fi + usb_disconnect + c_warning "Check again in 1s..." + sleep 1 + usb_connect + done + return $OK +} +device_not_connected() { + device_connected || return $OK + return $EAGAIN +} + +device_status() { + _CNT='NO'; device_connected || _CNT='NO' + _FBT='NO'; device_in_fastboot || _FBT='NO' + _ADB='NO'; device_in_adb || _ADB='NO' + c_info "Current device Status" + c_info "Reachable : $_CNT" + c_info "Fastboot : $_FBT" + c_info "ADB Mode : $_ADB" +} + +################################################################################ +# DEVICE boot checks +################################################################################ +reboot_timedout() { + [ $ELAPSED -lt $REBOOT_TIMEOUT ] || return $OK + return $EAGAIN +} +reboot_not_timedout() { + reboot_timedout || return $OK + return $EAGAIN +} + +boot_completed() { + COMPLETED=$($ADB shell getprop sys.boot_completed) + [[ "x$COMPLETED" = x1* ]] || return $EAGAIN + return $OK +} +boot_not_completed() { + boot_completed || return $OK + return $EAGAIN +} + + +################################################################################ +# FASTBOOT mode +################################################################################ +reboot_fastboot() { + seconds=$1 + + # Check device is connected + if device_not_connected; then + c_error "device not connected" + return $ENODEV + fi + + # Already in fastboot mode: nothing to do + if device_in_fastboot; then + c_info "Device already in fastboot mode, continuing..." + return $OK + fi + + c_info "Device in ADB mode" + c_info "Rebooting into bootloader..." + $ADB reboot bootloader + + echo + c_info "Waiting for bootloader up to $REBOOT_TIMEOUT[s]..." + ELAPSED=0 + sleep 5 + + # Wait for device to leave ADB mode + [ $DRYRUN -eq 1 ] || \ + until device_not_in_adb || reboot_timedout; do + sleep 3; let ELAPSED+=3 + done + echo + if reboot_timedout; then + c_error "device not leaving ADB mode" + d_notify "Bootloader enter TIMEOUT!" face-embarrassed + exit $EIO + fi + + # Wait for device to enter FASTBOOT mode + [ $DRYRUN -eq 1 ] || \ + while device_not_in_fastboot && reboot_not_timedout; do + usb_disconnect &>/dev/null + sleep 3; let ELAPSED+=3 + usb_connect &>/dev/null + done + echo + if reboot_timedout; then + c_error "device not entering in FASTBOOT mode" + d_notify "Bootloader enter TIMEOUT!" face-embarrassed + exit $EIO + fi + + return $OK +} + +################################################################################ +# Build and test a specified SHA1 checkout +################################################################################ + +name_sha1() { + COMMIT_SHA1=${1:0:7} + MINLEN=12345 + + # In case the specified SHA1 has not a name, let's use the SHA1 + COMMIT_NAME=$COMMIT_SHA1 + + # Find a name for each possible REF + mkfifo tmp_pipe &>/dev/null + git -C $KERNEL_SRC for-each-ref \ + --sort=-committerdate \ + --format='%(objectname:short) %(refname:short)' \ + refs/heads/ refs/remotes/ refs/tags | + grep $COMMIT_SHA1 | awk '{print $2}' > tmp_pipe & + while IFS= read -r NAME; do + # Return the fist name starting by "test_" + if [[ $NAME = test_* ]]; then + COMMIT_NAME=${NAME//\//:} + break + fi + # Or the shorted name for a given SHA1 + if [ ${#NAME} -lt $MINLEN ]; then + MINLEN=${#NAME} + COMMIT_NAME=${NAME//\//:} + fi + done < tmp_pipe + rm tmp_pipe +} + +match_sha1() { + COMMIT_SHA1=$1 + + c_info "Current kernel: " + CURRENT=$($ADB shell 'uname -a') + c_info " $CURRENT" + + [[ $CURRENT = *$COMMIT_SHA1* ]] || return $EAGAIN + + return $OK +} + +build_sha1() { + COMMIT_SHA1=$1 + COMMIT_DESC=$2 + + ### Prepare KERNEL_SRC for build + pushd $KERNEL_SRC &>/dev/null + echo + c_info "Checkout kernel: $KERNEL_SRC @ $COMMIT_SHA1..." + git checkout $COMMIT_SHA1; ERROR=$? + if [ $ERROR -ne 0 ]; then + c_error "Failed to checkout [$COMMIT_SHA1]" + popd &>/dev/null + return $ERROR + fi + popd &>/dev/null + + ### Build all IMAGES + pushd $BASE_DIR &>/dev/null + $PLATFORM_PATH/build_images + RESULT=$? + popd &>/dev/null + if [ $RESULT -ne $OK ]; then + exit $ENOENT + fi + [ $RESULT -eq $OK ] || return $RESULT +} + +flash_sha1() { + COMMIT_SHA1=$1 + COMMIT_DESC=$2 + + build_sha1 "$COMMIT_SHA1" "$COMMIT_DESC"; RESULT=$? + [ $RESULT -eq $OK ] || return $RESULT + + ### Reboot device into BOOTLOADER + echo + c_info "Rebooting device into bootloader..." + attempts=3 + [ $DRYRUN -eq 1 ] || reboot_fastboot + [ $DRYRUN -eq 1 ] || \ + while [[ $? -eq $ERROR ]]; do + let attempts-- + if [[ $attempts -eq 0 ]]; then + c_error "device not entering FASTBOOT mode" + exit $EIO + fi + c_warning "Failed entering FASTBOOT mode, $attempts remaining attempts..." + reboot_fastboot + done + + ### Flash generated IMAGES + pushd $PLATFORM_PATH/artifacts &>/dev/null + [ $DRYRUN -eq 1 ] || $PLATFORM_PATH/flash_images + RESULT=$? + popd &>/dev/null + [ $RESULT -eq $OK ] || return $RESULT + + ### Reboot into new kernel + echo + c_info "Reboot new kernel..." + d_notify "Rebooting device..." face-monkey + [ $DRYRUN -eq 1 ] || $FASTBOOT reboot + + c_info "Waiting up to $REBOOT_TIMEOUT[s] for boot to complete..." + ELAPSED=0 + + #### Wait for device to reboot + [ $DRYRUN -eq 1 ] || \ + until device_in_adb || reboot_timedout; do + usb_disconnect &>/dev/null + sleep 3; let ELAPSED+=3 + usb_connect &>/dev/null + done + echo + if reboot_timedout; then + c_error "device not entering ADB mode" + d_notify "Device reboot TIMEOUT!" face-embarrassed + exit $EIO + fi + c_info "Device in ADB mode" + + ### Wait for boot to complete + [ $DRYRUN -eq 1 ] || \ + until boot_not_completed || reboot_not_timedout; do + sleep 3; let ELAPSED+=3 + done + echo + if reboot_timedout; then + c_error "device still booting?!?" + d_notify "Device boot completion TIMEOUT!" face-embarrassed + exit $EIO + fi + c_info "Boot completed, wait 10[s] more..." + sleep 10 # Add an additional safe margin + + ### Check that we are running the expected kernel + match_sha1 $COMMIT_SHA1; ERROR=$? + [ $ERROR -eq 0 ] || c_error "Failed to flash kernel [$COMMIT_DESC]!" + return $? +} + +test_sha1() { + COMMIT_SHA1=$1 + COMMIT_ID=$2 + COMMITS_COUNT=$3 + COMMIT_DESC="$(grep $COMMIT_SHA1 $SERIES)" + + # Get a name (if any) for the specified SHA1 + name_sha1 $COMMIT_SHA1 + c_info "Testing kernel:" + c_info " SeriesID : $COMMIT_DESC" + c_info " CommitID : $COMMIT_SHA1 $COMMIT_NAME" + + echo + c_info "Check current kernel..." + if device_in_adb; then + match_sha1 $COMMIT_SHA1 + if [ $? -ne $OK ]; then + c_info "Kernel update required!" + flash_sha1 $COMMIT_SHA1 "$COMMIT_DESC" + if [[ $? -ne 0 ]]; then + c_warning "Skipping kernel [$COMMIT_DESC]" + return + fi + fi + else + c_warning "Device not connected via ADB, cannot check current kernel" + c_warning "Forcing: build, flashing and reboot selected kernel" + flash_sha1 $COMMIT_SHA1 "$COMMIT_DESC" + if [[ $? -ne 0 ]]; then + c_warning "Skipping kernel [$COMMIT_DESC]" + d_notify "Skipping kernel [$COMMIT_DESC]" face-sick + return + fi + fi + + echo + c_info "Running tests for [$COMMIT_SHA1: $COMMIT_NAME]..." + d_notify "Testing kernel $COMMIT_ID/$COMMITS_COUNT:\n$COMMIT_SHA1: $COMMIT_NAME..." face-tired + [ $DRYRUN -eq 1 ] || (set -x; eval $TEST_CMD; set +x) +} + +report() { + +if [[ $TEST_CMD = *"wa run"* ]]; then + # Check for ERRORs reported in WorkloadAutomation log files + c_info "Looking for WA errors..." + ERRORS=$(find $RESULTS -name run.log \ + -exec grep "WARNING executor: Please see" \{\} \;) + if [ -z "$ERRORS" ]; then + c_info "No Errors reported in WA logfiles" + else + c_warning "WA reported these errors:" + echo $ERRORS | while read ERR; do + c_warning " $(echo $ERR | awk '{print $7}')"; + done + fi +fi + +} + +# Prepare list of commits to test +SELECTED_LIST="$(realpath $SERIES).selected" +FMT=$(grep -v "^#" $SERIES | grep -v "00: " | head -n1 | awk '{print $1}') +if [[ $FMT = *: ]]; then + # Filter out disabled commits and sort based on progressive ID: + grep -E -v "^$|^#|00: " $SERIES | sort \ + | awk '{$1=""; print $0}' > $SELECTED_LIST +else + grep -E -v "^$|^#" $SERIES \ + | awk '{print $0}' > $SELECTED_LIST +fi + +# Report commits to be tested +COMMIT_ID=0 +echo +c_info "Commits selected for testing:" +cat $SELECTED_LIST | \ +while read COMMIT; do + let COMMIT_ID++ + STR=$(printf " %3s %s\n" $COMMIT_ID "$COMMIT") + c_info "$STR" +done + +# Extract total number of commits +COMMITS_COUNT=$(wc -l $SELECTED_LIST) +COMMITS_COUNT=${COMMITS_COUNT%% *} + +# Ensure USB is on at tests start +usb_connect 1>/dev/null +printf "\n%80s\n\n" | tr " " "=" + +# If the target isn't connected at the start, bail out +device_connected || exit 1 + +# Test each commit +COMMIT_ID=1 +# Here we read from an arbitrary file descriptor 10 to avoid overlaps with +# stdin generated by the adb commands in the loop body +while read -u10 COMMITS; do + # Extract SHA1 from commit description + COMMIT_SHA1=${COMMITS%% *} + + box_out "PROGRESS : $COMMIT_ID/$COMMITS_COUNT" \ + "COMMIT : $COMMITS" + echo + device_status + + echo + test_sha1 $COMMIT_SHA1 $COMMIT_ID $COMMITS_COUNT + + let COMMIT_ID++ + printf "\n%80s\n\n" | tr " " "=" + +done 10<$SELECTED_LIST + +# Generate reports +report + +d_notify "Tests completed!" face-cool + diff --git a/tools/workload-automation b/tools/workload-automation index 12edabf753d81fb18fbcade7bae6b76ec3e075e2..eb0f53c8f644e6acf99ffba9921c1d0c8186af55 160000 --- a/tools/workload-automation +++ b/tools/workload-automation @@ -1 +1 @@ -Subproject commit 12edabf753d81fb18fbcade7bae6b76ec3e075e2 +Subproject commit eb0f53c8f644e6acf99ffba9921c1d0c8186af55