diff --git a/.travis.yml b/.travis.yml index 2fafc939648f798272e8edb5f4d2620792ab5585..7da785246b1193261d20a3166c584caa2b0b3581 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,7 @@ sudo: required language: python install: - pip install --upgrade trappy bart-py devlib psutil wrapt matplotlib - jupyter sphinx + future jupyter sphinx script: - cd $TRAVIS_BUILD_DIR - 'echo backend : Agg > matplotlibrc' # Otherwise it tries to use tkinter diff --git a/libs/devlib b/libs/devlib index 5001fae516d442f6154c35b2da4ee5423bdc2a4a..fe0d6eda2acb7345e0499943e8effac187aac91c 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit 5001fae516d442f6154c35b2da4ee5423bdc2a4a +Subproject commit fe0d6eda2acb7345e0499943e8effac187aac91c diff --git a/tools/wa_user_directory/plugins/jankbench/__init__.py b/tools/wa_user_directory/plugins/jankbench/__init__.py deleted file mode 100644 index 6f3bc198c7c0c7238afdf176d018178e14797e00..0000000000000000000000000000000000000000 --- a/tools/wa_user_directory/plugins/jankbench/__init__.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2017 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import csv -import os -import re -import subprocess -import threading -import select -import sqlite3 - -from wa import Parameter, ApkWorkload -from wa.framework.exception import WorkloadError - -REGEXPS = { - 'start': (r'.*START.*' - 'cmp=com.android.benchmark/.app.RunLocalBenchmarksActivity.*'), - 'count': '.*iteration: (?P[0-9]+).*', - 'metrics': (r'.*Mean: (?P[0-9\.]+)\s+JankP: (?P[0-9\.]+)\s+' - 'StdDev: (?P[0-9\.]+)\s+Count Bad: (?P[0-9]+)\s+' - 'Count Jank: (?P[0-9]+).*'), - 'done': r'.*BenchmarkDone!.*', -} - -class Jankbench(ApkWorkload): - - name = 'jankbench' - description = """ - Google's Jankbench benchmark. - - Jankbench simulates user interaction with Android UI components and records - frame rendering times and 'jank' (rendering discontinuity) in an SQLite - database. This is believed to be a good proxy for the smoothness of user - experience. - - Dumps a JankbenchResults.sqlite file in the output directory. This database - contains a table 'ui_results' with a row for each frame, showing its - rendering time in ms in the 'total_duration' column, and whether or not it - was a jank frame in the 'jank_frame' column. - - This information is also extracted from the SQLite file and dumped as - jankbench_frames.csv. This is _not_ necessarily the same information as - provided by gfxinfo (fps instrument). - """ - - versions = ['1.0'] - activity = '.app.RunLocalBenchmarksActivity' - package = 'com.android.benchmark' - package_names = [package] - - target_db_path = '/data/data/{}/databases/BenchmarkResults'.format(package) - - test_ids = { - 'list_view' : 0, - 'image_list_view' : 1, - 'shadow_grid' : 2, - 'low_hitrate_text' : 3, - 'high_hitrate_text' : 4, - 'edit_text' : 5, - } - - parameters = [ - Parameter('test', - default=test_ids.keys()[0], allowed_values=test_ids.keys(), - description='Which Jankbench sub-benchmark to run'), - Parameter('run_timeout', kind=int, default=10 * 60, - description=""" - Timeout for workload execution. The workload will be killed if it hasn't completed - within this period. In seconds. - """), - Parameter('times', kind=int, default=1, constraint=lambda x: x > 0, - description=('Specifies the number of times the benchmark will be run in a "tight ' - 'loop", i.e. without performing setup/teardown in between.')), - ] - - def initialize(self, context): - super(Jankbench, self).initialize(context) - - # Need root to get results database - if not self.target.is_rooted: - raise WorkloadError('Jankbench workload requires device to be rooted') - - def setup(self, context): - super(Jankbench, self).setup(context) - self.monitor = self.target.get_logcat_monitor(REGEXPS.values()) - self.monitor.start() - - self.command = ( - 'am start -n com.android.benchmark/.app.RunLocalBenchmarksActivity ' - '--eia com.android.benchmark.EXTRA_ENABLED_BENCHMARK_IDS {0} ' - '--ei com.android.benchmark.EXTRA_RUN_COUNT {1}' - ).format(self.test_ids[self.test], self.times) - - - def run(self, context): - # All we need to do is - # - start the activity, - # - then use the JbRunMonitor to wait until the benchmark reports on - # logcat that it is finished, - # - pull the result database file. - - result = self.target.execute(self.command) - if 'FAILURE' in result: - raise WorkloadError(result) - else: - self.logger.debug(result) - - self.monitor.wait_for(REGEXPS['start'], timeout=30) - self.logger.info('Detected Jankbench start') - - self.monitor.wait_for(REGEXPS['done'], timeout=300*self.times) - - def extract_results(self, context): - # TODO make these artifacts where they should be - super(Jankbench, self).extract_results(context) - host_db_path = os.path.join(context.output_directory, - 'BenchmarkResults.sqlite') - self.target.pull(self.target_db_path, host_db_path, as_root=True) - context.add_artifact('jankbench_results_db', host_db_path, 'data') - - columns = ['_id', 'name', 'run_id', 'iteration', 'total_duration', 'jank_frame'] - jank_frame_idx = columns.index('jank_frame') - query = 'SELECT {} FROM ui_results'.format(','.join(columns)) - conn = sqlite3.connect(os.path.join(host_db_path)) - - csv_path = os.path.join(context.output_directory, 'jankbench_frames.csv') - jank_frames = 0 - with open(csv_path, 'wb') as f: - writer = csv.writer(f) - writer.writerow(columns) - for db_row in conn.execute(query): - writer.writerow(db_row) - if int(db_row[jank_frame_idx]): - jank_frames += 1 - context.add_artifact('jankbench_results_csv', csv_path, 'data') - - context.add_metric('jankbench_jank_frames', jank_frames, - lower_is_better=True) - - def teardown(self, context): - self.monitor.stop() diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py deleted file mode 100644 index 36f46a90cd311af920021127a52b9fc67242eb27..0000000000000000000000000000000000000000 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ /dev/null @@ -1,139 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2017, Arm Limited and contributors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import re -import time -from zipfile import ZipFile - -from wa import Parameter, Workload -from wa.framework.exception import WorkloadError - -INSTALL_INSTRUCTIONS=""" -This workload has incomplete automation support. Please download the APK from -http://www.futuremark.com/downloads/pcmark-android.apk -and install it on the device. Connect the device to the internet, then open the -app on the device, and hit the 'install' button to set up the 'Work v2' -benchmark. -""" - -class PcMark(Workload): - """ - Android PCMark workload - - TODO: This isn't a proper WA workload! It requires that the app is already - installed set up like so: - - - Install the APK from http://www.futuremark.com/downloads/pcmark-android.apk - - Open the app and hit "install" - - """ - name = 'pcmark' - - package = 'com.futuremark.pcmark.android.benchmark' - activity = 'com.futuremark.gypsum.activity.SplashPageActivity' - - package_names = ['com.google.android.youtube'] - action = 'android.intent.action.VIEW' - - parameters = [ - Parameter('test', default='work', allowed_values=['work'], - description='PCMark sub-benchmark to run'), - ] - - - regexps = { - 'start' : '.*START.*com.futuremark.pcmark.android.benchmark', - 'result': '.*received result for correct code, result file in (?P.*\.zip)' - } - - def initialize(self, context): - super(PcMark, self).initialize(context) - - # Need root to get results - if not self.target.is_rooted: - raise WorkloadError('PCMark workload requires device to be rooted') - - if not self.target.is_installed(self.package): - raise WorkloadError('Package not installed. ' + INSTALL_INSTRUCTIONS) - - path = ('/storage/emulated/0/Android/data/{}/files/dlc/pcma-workv2-data' - .format(self.package)) - if not self.target.file_exists(path): - raise WorkloadError('"Work v2" benchmark not installed through app. ' - + INSTALL_INSTRUCTIONS) - - def setup(self, context): - super(PcMark, self).setup(context) - - self.target.execute('am kill-all') # kill all *background* activities - self.target.execute('am start -n {}/{}'.format(self.package, self.activity)) - time.sleep(5) - - # TODO: we clobber the old auto-rotation setting here. - self.target.set_auto_rotation(False) - self._saved_screen_rotation = self.target.get_rotation() - # Move to benchmark run page - self.target.set_left_rotation() # Needed to make TAB work - self.target.execute('input keyevent KEYCODE_TAB') - self.target.execute('input keyevent KEYCODE_TAB') - - self.monitor = self.target.get_logcat_monitor(self.regexps.values()) - self.monitor.start() - - def run(self, context): - self.target.execute('input keyevent KEYCODE_ENTER') - - self.monitor.wait_for('.*START.*com.futuremark.pcmark.android.benchmark', - timeout=20) - self.logger.info('Detected PCMark start') - - [self.output] = self.monitor.wait_for(self.regexps['result'], timeout=600) - - def extract_results(self, context): - remote_zip_path = re.match(self.regexps['result'], self.output).group('path') - local_zip_path = os.path.join(context.output_directory, - self.target.path.basename(remote_zip_path)) - self.logger.info('pulling {} -> {}'.format(remote_zip_path, local_zip_path)) - self.target.pull(remote_zip_path, local_zip_path, as_root=True) - - with ZipFile(local_zip_path, 'r') as archive: - archive.extractall(context.output_directory) - - xml_path = os.path.join(context.output_directory, 'Result.xml') - if not os.path.exists(xml_path): - raise WorkloadError("PCMark results .zip didn't contain Result.xml") - context.add_artifact('pcmark_result_xml', xml_path, 'data') - - # Fetch workloads names and scores - score_regex = re.compile('\s*.*)Score>(?P[0-9]*)<') - with open(xml_path) as f: - for line in f: - match = score_regex.match(line) - if match: - metric_name = 'pcmark_{}'.format(match.group('name')) - context.add_metric(metric_name, match.group('score')) - - - def teardown(self, context): - super(PcMark, self).teardown(context) - - self.target.execute('am force-stop {}'.format(self.package)) - - self.monitor.stop() - self.target.set_rotation(int(self._saved_screen_rotation)) - diff --git a/tools/wltests/agendas/sched-evaluation-full.yaml b/tools/wltests/agendas/sched-evaluation-full.yaml index c939bae3add39120f0832ceb6902a2356f21c409..971ebd12fc4131010af3c3c92ccad3f9f42edc9a 100644 --- a/tools/wltests/agendas/sched-evaluation-full.yaml +++ b/tools/wltests/agendas/sched-evaluation-full.yaml @@ -57,6 +57,7 @@ workloads: classifiers: tag: mov_720p_30s workload_parameters: + version: 2.4 format: "mov_720p" landscape: True duration: 30 @@ -67,6 +68,7 @@ workloads: classifiers: tag: ogg_128kbps_30s workload_parameters: + version: 2.4 format: "ogg_128kbps" duration: 30 @@ -98,7 +100,7 @@ workloads: classifiers: tag: iter_30 workload_parameters: - test: list_view + test_ids: list_view iterations: 30 - name: jankbench @@ -106,7 +108,7 @@ workloads: classifiers: tag: iter_30 workload_parameters: - test: image_list_view + test_ids: image_list_view iterations: 30 - name: jankbench @@ -114,7 +116,7 @@ workloads: classifiers: tag: iter_30 workload_parameters: - test: shadow_grid + test_ids: shadow_grid iterations: 30 - name: jankbench @@ -122,7 +124,7 @@ workloads: classifiers: tag: iter_30 workload_parameters: - test: low_hitrate_text + test_ids: low_hitrate_text iterations: 30 - name: jankbench @@ -130,5 +132,5 @@ workloads: classifiers: tag: iter_30 workload_parameters: - test: edit_text + test_ids: edit_text iterations: 30 diff --git a/tools/workload-automation b/tools/workload-automation index b0262e5103488648c4a5b20cddb42fa2b52104ad..b3de85455a872cd269187fb36263a17390f14d9c 160000 --- a/tools/workload-automation +++ b/tools/workload-automation @@ -1 +1 @@ -Subproject commit b0262e5103488648c4a5b20cddb42fa2b52104ad +Subproject commit b3de85455a872cd269187fb36263a17390f14d9c