From eaf7243ba89349e5d762553066d117c938dcdf37 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 14:52:26 +0100 Subject: [PATCH 01/84] submodules: update workload-automation to master Signed-off-by: Patrick Bellasi --- tools/workload-automation | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/workload-automation b/tools/workload-automation index 12edabf75..42b148655 160000 --- a/tools/workload-automation +++ b/tools/workload-automation @@ -1 +1 @@ -Subproject commit 12edabf753d81fb18fbcade7bae6b76ec3e075e2 +Subproject commit 42b1486559e5559300d94508ce4b74551825e238 -- GitLab From e3f1de30a2bbd872646824f2934a308ae9409209 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:09:50 +0100 Subject: [PATCH 02/84] submodules: update devlib to master Signed-off-by: Patrick Bellasi --- libs/devlib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/devlib b/libs/devlib index dc453ad89..16d5e0b6a 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit dc453ad8916cfb914c9dafaad8b0b440d3a4b443 +Subproject commit 16d5e0b6a78c676449596b3bae41ff65979b4181 -- GitLab From d74da92035e77339c385f1d01b24f7b63893346c Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 6 Oct 2017 14:49:13 +0100 Subject: [PATCH 03/84] tools/wltests: add initial support for testing a series of patches This provides a minimal set of scripts to support building, flashing and booting an Android device for each kernel commits of a provided list. The WLTest suite lives under LISA_HOME/tools/wltests and it consists of these modules: - test_series: the main script which allows to build, flash and reboot an Android target for a given set of SHA1 of a specified kernel source tree - android/*.sh: a set of script to support the generation of Android specific images - build: a set of functions to build different targets with proper abstractions for toolchains provisioning and generation of kernel builds configuration file - helpers: a set of supporting functions These scripts works based on a platform specific set of definitions which is provided by a following set of patches. Signed-off-by: Patrick Bellasi --- tools/wltests/android/create_boot_img.sh | 88 +++ tools/wltests/android/create_dt_img.sh | 52 ++ tools/wltests/android/mkbootimg | 175 ++++++ tools/wltests/android/mkdtimg | 159 +++++ tools/wltests/android/split_bootimg.pl | 212 +++++++ tools/wltests/build | 305 ++++++++++ tools/wltests/helpers | 209 +++++++ tools/wltests/test_series | 706 +++++++++++++++++++++++ 8 files changed, 1906 insertions(+) create mode 100755 tools/wltests/android/create_boot_img.sh create mode 100755 tools/wltests/android/create_dt_img.sh create mode 100755 tools/wltests/android/mkbootimg create mode 100755 tools/wltests/android/mkdtimg create mode 100755 tools/wltests/android/split_bootimg.pl create mode 100755 tools/wltests/build create mode 100644 tools/wltests/helpers create mode 100755 tools/wltests/test_series diff --git a/tools/wltests/android/create_boot_img.sh b/tools/wltests/android/create_boot_img.sh new file mode 100755 index 000000000..59b1fb9a3 --- /dev/null +++ b/tools/wltests/android/create_boot_img.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +################################################################################ +# Internal configurations +################################################################################ +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/.." +source "${BASE_DIR}/helpers" +source "${DEFINITIONS_PATH}" + +DEFAULT_KERNEL="${KERNEL_PATH}/arch/${ARCH}/boot/${KERNEL_IMAGE}" +KERNEL="${KERNEL:-$DEFAULT_KERNEL}" + +DEFAULT_RAMDISK="${PLATFORM_OVERLAY_PATH}/${RAMDISK_IMAGE}" +RAMDISK="${RAMDISK:-$DEFAULT_RAMDISK}" + +DEFAULT_BOOT_IMAGE="${ARTIFACTS_PATH}/${ANDROID_BOOT_IMAGE}" +BOOT_IMAGE="${BOOT_IMAGE:-$DEFAULT_BOOT_IMAGE}" + +CMDLINE=${CMDLINE:-$KERNEL_CMDLINE} + +if [ ! -f ${KERNEL} ] ; then + c_error "KERNEL image not found: ${KERNEL}" + exit $ENOENT +fi +if [ ! -f ${RAMDISK} ] ; then + c_error "RAMDISK image not found: ${RAMDISK}" + c_warning "A valid ramdisk image, which matches the device user-space" + c_warning "must be deployed by the user under the required path." + c_info "Please refer to the ISTALLATION INSTRUCTIONS" + c_info "if you don'tknow how to provide such an image." + echo + exit $ENOENT +fi + +################################################################################ +# Report configuration +################################################################################ +echo +c_info "Generate BOOT image:" +c_info " $BOOT_IMAGE" +c_info "using this configuration :" +c_info " KERNEL : $KERNEL" +c_info " RAMDISK : $RAMDISK" +c_info " CMDLINE : $CMDLINE" +c_info " ANDROID_IMAGE_BASE : $ANDROID_IMAGE_BASE" +c_info " ANDROID_IMAGE_PAGESIZE : $ANDROID_IMAGE_PAGESIZE" +c_info " ANDROID_OS_VERSION : $ANDROID_OS_VERSION" +c_info " ANDROID_OS_PATCH_LEVEL : $ANDROID_OS_PATCH_LEVEL" + +# Optional arguments +if [ "${ANDROID_TAGS_OFFSET}" ]; then + c_info "- ANDROID_TAGS_OFFSET : ${ANDROID_TAGS_OFFSET}" + ANDROID_TAGS_OFFSET="--tags_offset ${ANDROID_TAGS_OFFSET}" +fi + +if [ "${ANDROID_KERNEL_OFFSET}" ]; then + c_info "- ANDROID_KERNEL_OFFSET : ${ANDROID_KERNEL_OFFSET}" + ANDROID_KERNEL_OFFSET="--kernel_offset ${ANDROID_KERNEL_OFFSET}" +fi + +if [ "${ANDROID_RAMDISK_OFFSET}" ]; then + c_info "- ANDROID_RAMDISK_OFFSET : ${ANDROID_RAMDISK_OFFSET}" + ANDROID_RAMDISK_OFFSET="--ramdisk_offset ${ANDROID_RAMDISK_OFFSET}" +fi + +################################################################################ +# Generate BOOT image +################################################################################ + +# Ensure the output folder exists +mkdir -p $(dirname $BOOT_IMAGE) &>/dev/null + +set -x +"${ANDROID_SCRIPTS_PATH}/mkbootimg" \ + --kernel "${KERNEL}" \ + --ramdisk "${RAMDISK}" \ + --cmdline "${CMDLINE}" \ + --base "${ANDROID_IMAGE_BASE}" \ + --pagesize "${ANDROID_IMAGE_PAGESIZE}" \ + --os_version "${ANDROID_OS_VERSION}" \ + --os_patch_level "${ANDROID_OS_PATCH_LEVEL}" \ + ${ANDROID_TAGS_OFFSET} \ + ${ANDROID_KERNEL_OFFSET} \ + ${ANDROID_RAMDISK_OFFSET} \ + --output "${BOOT_IMAGE}" +set +x + diff --git a/tools/wltests/android/create_dt_img.sh b/tools/wltests/android/create_dt_img.sh new file mode 100755 index 000000000..839b42f50 --- /dev/null +++ b/tools/wltests/android/create_dt_img.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +################################################################################ +# Internal configurations +################################################################################ +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/.." +source "${BASE_DIR}/helpers" +source "${DEFINITIONS_PATH}" + +DEFAULT_DTB="${KERNEL_PATH}/arch/${ARCH}/boot/dts/${KERNEL_DTB}" +DTB="${DTB:-$DEFAULT_DTB}" + +DEFAULT_DTB_IMAGE="${ARTIFACTS_PATH}/${ANDROID_DTB_IMAGE}" +DTB_IMAGE="${DTB_IMAGE:-$DEFAULT_DTB_IMAGE}" + +if [ ! -f ${DTB} ] ; then + c_error "DTB not found: ${DTB}" + exit $ENOENT +fi + +################################################################################ +# Report configuration +################################################################################ +echo +c_info "Generate DTB image:" +c_info " $DTB_IMAGE" +c_info "using this configuration :" +c_info " DTB : $DTB" +c_info " ANDROID_IMAGE_PAGESIZE : $ANDROID_IMAGE_PAGESIZE" + +# Optional arguments +if [ "x${ANDROID_DTB_COMPRESSED}"=="xYES" ]; then + c_info "- ANDROID_DTB_COMPRESSED : $ANDROID_DTB_COMPRESSED" + ANDROID_DTB_COMPRESSED="--compress" +fi + +################################################################################ +# Generate BOOT image +################################################################################ + +# Ensure the output folder exists +mkdir -p $(dirname $DTB_IMAGE) &>/dev/null + +set -x +"${ANDROID_SCRIPTS_PATH}"/mkdtimg \ + --dtb "${DTB}" \ + --pagesize "${ANDROID_IMAGE_PAGESIZE}" \ + $ANDROID_DTB_COMPRESSED \ + --output "${DTB_IMAGE}" +set +x + diff --git a/tools/wltests/android/mkbootimg b/tools/wltests/android/mkbootimg new file mode 100755 index 000000000..5a13da26b --- /dev/null +++ b/tools/wltests/android/mkbootimg @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# Copyright 2015, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +from sys import argv, exit, stderr +from argparse import ArgumentParser, FileType, Action +from os import fstat +from struct import pack +from hashlib import sha1 +import sys +import re + +def filesize(f): + if f is None: + return 0 + try: + return fstat(f.fileno()).st_size + except OSError: + return 0 + + +def update_sha(sha, f): + if f: + sha.update(f.read()) + f.seek(0) + sha.update(pack('I', filesize(f))) + else: + sha.update(pack('I', 0)) + + +def pad_file(f, padding): + pad = (padding - (f.tell() & (padding - 1))) & (padding - 1) + f.write(pack(str(pad) + 'x')) + + +def write_header(args): + BOOT_MAGIC = 'ANDROID!'.encode() + args.output.write(pack('8s', BOOT_MAGIC)) + args.output.write(pack('10I', + filesize(args.kernel), # size in bytes + args.base + args.kernel_offset, # physical load addr + filesize(args.ramdisk), # size in bytes + args.base + args.ramdisk_offset, # physical load addr + filesize(args.second), # size in bytes + args.base + args.second_offset, # physical load addr + args.base + args.tags_offset, # physical addr for kernel tags + args.pagesize, # flash page size we assume + 0, # future expansion: MUST be 0 + (args.os_version << 11) | args.os_patch_level)) # os version and patch level + args.output.write(pack('16s', args.board.encode())) # asciiz product name + args.output.write(pack('512s', args.cmdline[:512].encode())) + + sha = sha1() + update_sha(sha, args.kernel) + update_sha(sha, args.ramdisk) + update_sha(sha, args.second) + img_id = pack('32s', sha.digest()) + + args.output.write(img_id) + args.output.write(pack('1024s', args.cmdline[512:].encode())) + pad_file(args.output, args.pagesize) + return img_id + + +class ValidateStrLenAction(Action): + def __init__(self, option_strings, dest, nargs=None, **kwargs): + if 'maxlen' not in kwargs: + raise ValueError('maxlen must be set') + self.maxlen = int(kwargs['maxlen']) + del kwargs['maxlen'] + super(ValidateStrLenAction, self).__init__(option_strings, dest, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if len(values) > self.maxlen: + raise ValueError('String argument too long: max {0:d}, got {1:d}'. + format(self.maxlen, len(values))) + setattr(namespace, self.dest, values) + + +def write_padded_file(f_out, f_in, padding): + if f_in is None: + return + f_out.write(f_in.read()) + pad_file(f_out, padding) + + +def parse_int(x): + return int(x, 0) + +def parse_os_version(x): + match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x) + if match: + a = int(match.group(1)) + b = c = 0 + if match.lastindex >= 2: + b = int(match.group(2)) + if match.lastindex == 3: + c = int(match.group(3)) + # 7 bits allocated for each field + assert a < 128 + assert b < 128 + assert c < 128 + return (a << 14) | (b << 7) | c + return 0 + +def parse_os_patch_level(x): + match = re.search(r'^(\d{4})-(\d{2})-(\d{2})', x) + if match: + y = int(match.group(1)) - 2000 + m = int(match.group(2)) + # 7 bits allocated for the year, 4 bits for the month + assert y >= 0 and y < 128 + assert m > 0 and m <= 12 + return (y << 4) | m + return 0 + +def parse_cmdline(): + parser = ArgumentParser() + parser.add_argument('--kernel', help='path to the kernel', type=FileType('rb'), + required=True) + parser.add_argument('--ramdisk', help='path to the ramdisk', type=FileType('rb')) + parser.add_argument('--second', help='path to the 2nd bootloader', type=FileType('rb')) + parser.add_argument('--cmdline', help='extra arguments to be passed on the ' + 'kernel command line', default='', action=ValidateStrLenAction, maxlen=1536) + parser.add_argument('--base', help='base address', type=parse_int, default=0x10000000) + parser.add_argument('--kernel_offset', help='kernel offset', type=parse_int, default=0x00008000) + parser.add_argument('--ramdisk_offset', help='ramdisk offset', type=parse_int, default=0x01000000) + parser.add_argument('--second_offset', help='2nd bootloader offset', type=parse_int, + default=0x00f00000) + parser.add_argument('--os_version', help='operating system version', type=parse_os_version, + default=0) + parser.add_argument('--os_patch_level', help='operating system patch level', + type=parse_os_patch_level, default=0) + parser.add_argument('--tags_offset', help='tags offset', type=parse_int, default=0x00000100) + parser.add_argument('--board', help='board name', default='', action=ValidateStrLenAction, + maxlen=16) + parser.add_argument('--pagesize', help='page size', type=parse_int, + choices=[2**i for i in range(11,15)], default=2048) + parser.add_argument('--id', help='print the image ID on standard output', + action='store_true') + parser.add_argument('-o', '--output', help='output file name', type=FileType('wb'), + required=True) + return parser.parse_args() + + +def write_data(args): + write_padded_file(args.output, args.kernel, args.pagesize) + write_padded_file(args.output, args.ramdisk, args.pagesize) + write_padded_file(args.output, args.second, args.pagesize) + + +def main(): + args = parse_cmdline() + img_id = write_header(args) + write_data(args) + if args.id: + if isinstance(img_id, str): + # Python 2's struct.pack returns a string, but py3 returns bytes. + img_id = [ord(x) for x in img_id] + print('0x' + ''.join('{:02x}'.format(c) for c in img_id)) + +if __name__ == '__main__': + main() diff --git a/tools/wltests/android/mkdtimg b/tools/wltests/android/mkdtimg new file mode 100755 index 000000000..657220431 --- /dev/null +++ b/tools/wltests/android/mkdtimg @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# Copyright 2017, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function +try: + from os import fstat, stat, remove + from sys import exit + from argparse import ArgumentParser, FileType + from ctypes import sizeof, Structure, c_char, c_int + from struct import pack, calcsize + import zlib +except Exception as e: + print("some module is needed:" + str(e)) + exit(-1) + +dt_head_info_fmt = '4sII' +dt_entry_fmt = 'Q4I2Q' +dtimg_version = 1 +dtb_count = 1 + +def write32(output, value): + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) ; value=value // 256 + output.write(chr(value & 255)) + +def compress(filename, input, output): + output.write('\037\213\010') + output.write(chr(0)) + + statval = stat(filename) + write32(output, 0) + output.write('\002') + output.write('\003') + + crcval = zlib.crc32("") + compobj = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, + zlib.DEF_MEM_LEVEL, 0) + while True: + data = input.read(1024) + if data == "": + break + crcval = zlib.crc32(data, crcval) + output.write(compobj.compress(data)) + output.write(compobj.flush()) + write32(output, crcval) + write32(output, statval.st_size) + +def dtb_compress(dtb_file): + try: + outputname = dtb_file + '.gz' + input = open(dtb_file, 'rb') + output = open(outputname, 'wb') + compress(dtb_file, input, output) + input.close() + output.close() + except Exception as e: + print('dtb_compress error:' + str(e)) + exit(-1) + return outputname + +class dt_head_info(Structure): + _fields_ = [('magic', c_char * 4), + ('version', c_int), + ('dt_count', c_int)] + +class dt_entry_t(Structure): + _fields_ = [('dtb_size', c_int), + ('dtb_offset', c_int)] + +def align_page_size(offset, pagesize): + return (pagesize - (offset % pagesize)) + +def write_head_info(head_info, args): + args.output.write(pack(dt_head_info_fmt, + head_info.magic, + head_info.version, + head_info.dt_count)) + +def write_dtb_entry_t(dt_entry, args): + args.output.write(pack(dt_entry_fmt, + 0, # reserved + dt_entry.dtb_size, + 0, # reserved + dt_entry.dtb_offset, + 0, # reserved + 0, # reserved + 0)) # reserved + +def write_padding(args, padding): + for i in range(0, padding): + args.output.write('\x00') + +def write_dtb(args): + dtb_file = args.dtb + out_dtb = dtb_file + if args.compress == True: + out_dtb = dtb_compress(dtb_file) + try: + dtb_offset = calcsize(dt_head_info_fmt) + \ + calcsize(dt_entry_fmt) + \ + 4 + padding = align_page_size(dtb_offset, args.pagesize) + dtb_size = stat(out_dtb).st_size + dtb_size_padding = align_page_size(dtb_size, args.pagesize) + dt_entry = dt_entry_t(dtb_size + dtb_size_padding, + dtb_offset + padding) + write_dtb_entry_t(dt_entry, args) + args.output.write(pack('I', 0)) # SUCCESS code number + write_padding(args, padding) + with open(out_dtb, 'rb') as dtb_fd: + args.output.write(dtb_fd.read(dtb_size)) + write_padding(args, dtb_size_padding) + except Exception as e: + print('write dtb error:' + str(e)) + exit(-1) + +def clean_gz_file(args): + try: + if args.compress != True: + return + remove(args.dtb + '.gz') + except Exception as e: + print('clean gz file error:' + str(e)) + exit(-1) + +def parse_cmdline(): + parser = ArgumentParser() + parser.add_argument('-c', '--compress', help='compress dtb or not', + action='store_true') + parser.add_argument('-d', '--dtb', help='path to the dtb', type=str, + required=True) + parser.add_argument('-s', '--pagesize', help='align page size', + type=int, choices=[2**i for i in range(11,15)], + default=2048) + parser.add_argument('-o', '--output', help='output file name', + type=FileType('wb'), required=True) + return parser.parse_args() + +def main(): + args = parse_cmdline() + dtimg_head_info = dt_head_info('HSDT', dtimg_version, dtb_count) + write_head_info(dtimg_head_info, args) + write_dtb(args) + clean_gz_file(args) + +if __name__ == '__main__': + main() diff --git a/tools/wltests/android/split_bootimg.pl b/tools/wltests/android/split_bootimg.pl new file mode 100755 index 000000000..d3abee078 --- /dev/null +++ b/tools/wltests/android/split_bootimg.pl @@ -0,0 +1,212 @@ +#!/usr/bin/perl +###################################################################### +# +# File : split_bootimg.pl +# Author(s) : William Enck +# Description : Split appart an Android boot image created +# with mkbootimg. The format can be found in +# android-src/system/core/mkbootimg/bootimg.h +# +# Thanks to alansj on xda-developers.com for +# identifying the format in bootimg.h and +# describing initial instructions for splitting +# the boot.img file. +# +# Last Modified : Tue Dec 2 23:36:25 EST 2008 +# By : William Enck +# +# Copyright (c) 2008 William Enck +# +###################################################################### + +use strict; +use warnings; + +# Turn on print flushing +$|++; + +###################################################################### +## Global Variables and Constants + +my $SCRIPT = __FILE__; +my $IMAGE_FN = undef; + +# Constants (from bootimg.h) +use constant BOOT_MAGIC => 'ANDROID!'; +use constant BOOT_MAGIC_SIZE => 8; +use constant BOOT_NAME_SIZE => 16; +use constant BOOT_ARGS_SIZE => 512; + +# Unsigned integers are 4 bytes +use constant UNSIGNED_SIZE => 4; + +# Parsed Values +my $PAGE_SIZE = undef; +my $KERNEL_SIZE = undef; +my $RAMDISK_SIZE = undef; +my $SECOND_SIZE = undef; + +###################################################################### +## Main Code + +&parse_cmdline(); +&parse_header($IMAGE_FN); + +=format (from bootimg.h) +** +-----------------+ +** | boot header | 1 page +** +-----------------+ +** | kernel | n pages +** +-----------------+ +** | ramdisk | m pages +** +-----------------+ +** | second stage | o pages +** +-----------------+ +** +** n = (kernel_size + page_size - 1) / page_size +** m = (ramdisk_size + page_size - 1) / page_size +** o = (second_size + page_size - 1) / page_size +=cut + +my $n = int(($KERNEL_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); +my $m = int(($RAMDISK_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); +my $o = int(($SECOND_SIZE + $PAGE_SIZE - 1) / $PAGE_SIZE); + +my $k_offset = $PAGE_SIZE; +my $r_offset = $k_offset + ($n * $PAGE_SIZE); +my $s_offset = $r_offset + ($m * $PAGE_SIZE); + +(my $base = $IMAGE_FN) =~ s/.*\/(.*)$/$1/; +my $k_file = $base . "-kernel"; +my $r_file = $base . "-ramdisk.gz"; +my $s_file = $base . "-second.gz"; + +# The kernel is always there +print "Writing $k_file ..."; +&dump_file($IMAGE_FN, $k_file, $k_offset, $KERNEL_SIZE); +print " complete.\n"; + +# The ramdisk is always there +print "Writing $r_file ..."; +&dump_file($IMAGE_FN, $r_file, $r_offset, $RAMDISK_SIZE); +print " complete.\n"; + +# The Second stage bootloader is optional +unless ($SECOND_SIZE == 0) { + print "Writing $s_file ..."; + &dump_file($IMAGE_FN, $s_file, $s_offset, $SECOND_SIZE); + print " complete.\n"; +} + +###################################################################### +## Supporting Subroutines + +=header_format (from bootimg.h) +struct boot_img_hdr +{ + unsigned char magic[BOOT_MAGIC_SIZE]; + unsigned kernel_size; /* size in bytes */ + unsigned kernel_addr; /* physical load addr */ + unsigned ramdisk_size; /* size in bytes */ + unsigned ramdisk_addr; /* physical load addr */ + unsigned second_size; /* size in bytes */ + unsigned second_addr; /* physical load addr */ + unsigned tags_addr; /* physical addr for kernel tags */ + unsigned page_size; /* flash page size we assume */ + unsigned unused[2]; /* future expansion: should be 0 */ + unsigned char name[BOOT_NAME_SIZE]; /* asciiz product name */ + unsigned char cmdline[BOOT_ARGS_SIZE]; + unsigned id[8]; /* timestamp / checksum / sha1 / etc */ +}; +=cut +sub parse_header { + my ($fn) = @_; + my $buf = undef; + + open INF, $fn or die "Could not open $fn: $!\n"; + binmode INF; + + # Read the Magic + read(INF, $buf, BOOT_MAGIC_SIZE); + unless ($buf eq BOOT_MAGIC) { + die "Android Magic not found in $fn. Giving up.\n"; + } + + # Read kernel size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($k_size, $k_addr) = unpack("VV", $buf); + + # Read ramdisk size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($r_size, $r_addr) = unpack("VV", $buf); + + # Read second size and address (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE * 2); + my ($s_size, $s_addr) = unpack("VV", $buf); + + # Ignore tags_addr + read(INF, $buf, UNSIGNED_SIZE); + + # get the page size (assume little-endian) + read(INF, $buf, UNSIGNED_SIZE); + my ($p_size) = unpack("V", $buf); + + # Ignore unused + read(INF, $buf, UNSIGNED_SIZE * 2); + + # Read the name (board name) + read(INF, $buf, BOOT_NAME_SIZE); + my $name = $buf; + + # Read the command line + read(INF, $buf, BOOT_ARGS_SIZE); + my $cmdline = $buf; + + # Ignore the id + read(INF, $buf, UNSIGNED_SIZE * 8); + + # Close the file + close INF; + + # Print important values + printf "Page size: %d (0x%08x)\n", $p_size, $p_size; + printf "Kernel size: %d (0x%08x)\n", $k_size, $k_size; + printf "Ramdisk size: %d (0x%08x)\n", $r_size, $r_size; + printf "Second size: %d (0x%08x)\n", $s_size, $s_size; + printf "Board name: $name\n"; + printf "Command line: $cmdline\n"; + + # Save the values + $PAGE_SIZE = $p_size; + $KERNEL_SIZE = $k_size; + $RAMDISK_SIZE = $r_size; + $SECOND_SIZE = $s_size; +} + +sub dump_file { + my ($infn, $outfn, $offset, $size) = @_; + my $buf = undef; + + open INF, $infn or die "Could not open $infn: $!\n"; + open OUTF, ">$outfn" or die "Could not open $outfn: $!\n"; + + binmode INF; + binmode OUTF; + + seek(INF, $offset, 0) or die "Could not seek in $infn: $!\n"; + read(INF, $buf, $size) or die "Could not read $infn: $!\n"; + print OUTF $buf or die "Could not write $outfn: $!\n"; + + close INF; + close OUTF; +} + +###################################################################### +## Configuration Subroutines + +sub parse_cmdline { + unless ($#ARGV == 0) { + die "Usage: $SCRIPT boot.img\n"; + } + $IMAGE_FN = $ARGV[0]; +} diff --git a/tools/wltests/build b/tools/wltests/build new file mode 100755 index 000000000..64867e1a8 --- /dev/null +++ b/tools/wltests/build @@ -0,0 +1,305 @@ +#!/bin/bash +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Internal configurations +################################################################################ + +BASE_DIR=$(dirname $(realpath -s $0)) +source "${BASE_DIR}/helpers" + + +################################################################################ +# Configuration options +################################################################################ + +usage() { + cat < merge this list of CONFIGs + --silent report only error/warning messages + + -h, --help print help and exit + + Usage notes: + 1) Without OPTIONS it builds the kernel with platform defined configs + 2) To force a kernel full build, when -m/-s/-c/-d/-x are given, add also -b + 3) If -k is _not_ given, the current .config will be altered by CONFIG_CMD or DEFCONFIG + +EOF +} + +# Configuration options +YES=1 +NO=0 + +# Configured targets +MAKE_MENUCONFIG=$NO +MAKE_SAVEDEFCONFIG=$NO +MAKE_KEEPCONFIG=$NO +BUILD_DTBS=$NO +BUILD_MODULES=$NO +BUILD_IMAGE=$NO +CLEAN_KERNEL=$NO +DISTCLEAN_KERNEL=$NO +CLEAN_ALL=$NO +USE_CCACHE=${USE_CACHE:-$NO} +OUT=/dev/stdout + +# With no options: build the default target +BUILD_NOW=$NO +[ $# != 0 ] || BUILD_NOW=$YES + +while [[ $# -gt 0 ]]; do + case $1 in + -b|--build) + BUILD_NOW=$YES + ;; + -c|--clean) + CLEAN_KERNEL=$YES + ;; + -d|--distclean) + DISTCLEAN_KERNEL=$YES + ;; + -i|--image) + BUILD_IMAGE=$YES + ;; + -k|--keepconfig) + MAKE_KEEPCONFIG=$YES + BUILD_NOW=$YES + ;; + -m|--menuconfig) + MAKE_MENUCONFIG=$YES + # Make menuconfig + # If no build option is given, it won't build + ;; + -s|--savedefconfig) + MAKE_SAVEDEFCONFIG=$YES + # Make savedefconfig + # If no build option(-b) is given, it won't build + ;; + -t|--dtbs) + BUILD_DTBS=$YES + ;; + -x|--clean_all) + CLEAN_ALL=$YES + ;; + --modules) + BUILD_MODULES=$YES + ;; + + -a|--use-ccache) + USE_CCACHE=$YES + ;; + -l|--config_list) + BUILD_CONFIG_LIST="$2" + shift + ;; + --silent) + OUT=/dev/null + ;; + + -h|--help) + usage + exit $OK + ;; + *) + usage + exit $EAGAIN + ;; + esac + shift # past argument or value +done + +# Format configuration +format_conf() { + CONF=$1 + if [ $CONF -eq $YES ]; then + echo -n "YES" + return + fi + echo -n " NO" +} + +# Print information about behavior +echo +c_info "Build configuration:" +c_info " Menuconfig : $(format_conf ${MAKE_MENUCONFIG})" +c_info " Savedefconfig : $(format_conf ${MAKE_SAVEDEFCONFIG})" +c_info " Keepconfig : $(format_conf ${MAKE_KEEPCONFIG})" +c_info " Build full : $(format_conf ${BUILD_NOW})" +c_info " Build Image : $(format_conf ${BUILD_IMAGE})" +c_info " Build dtbs : $(format_conf ${BUILD_DTBS})" +c_info " Build modules : $(format_conf ${BUILD_MODULES})" +c_info " Clean : $(format_conf ${CLEAN_KERNEL})" +c_info " Distclean : $(format_conf ${DISTCLEAN_KERNEL})" +c_info " Clean all : $(format_conf ${CLEAN_ALL})" +c_info " Use ccache : $(format_conf ${USE_CCACHE})" + +# Load platform definitions +source "${DEFINITIONS_PATH}" + +# Print information about platform +echo +c_info "Platform configuration:" +c_info " PLATFORM_NAME : ${PLATFORM_NAME}" +c_info " DEFCONFIG : ${DEFCONFIG}" +c_info " CONFIG_CMD : ${CONFIG_CMD}" +c_info " ARCH : ${ARCH}" +c_info " CROSS_COMPILE : ${CROSS_COMPILE}" + + +################################################################################ +# Configuring builds +################################################################################ + +# Export paths +export LD_LIBRARY_PATH="${TOOLS_PATH}/lib/:$LD_LIBRARY_PATH" +export PATH="${TOOLS_PATH}:$PATH" + +# Check for a valid toolchain +which ${CROSS_COMPILE}gcc &>/dev/null +if [ $? -ne 0 ]; then + echo + echo + c_error "(Cross)compiler [${CROSS_COMPILE}gcc] not found!" + c_warning "Ensure to have CROSS_COMPILE set to a valid toolchain" + c_warning "which should be reachable from your PATH" + echo + exit $EAGAIN +fi + +# Export compiler configuration +export ARCH +export CROSS_COMPILE +if [ "${USE_CCACHE}" -eq $YES ]; then + export CC="ccache ${CROSS_COMPILE}gcc" + export CXX="ccache ${CROSS_COMPILE}g++" +fi + +# Enable parallel builds +NCPUS="$(( 2 * $(nproc) ))" + + +################################################################################ +# Build cleanup +################################################################################ + +if [ "${CLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then + (cd "${KERNEL_PATH}"; make clean) +fi + +if [ "${DISTCLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then + (cd "${KERNEL_PATH}"; make distclean) +fi + +if [ "${MAKE_KEEPCONFIG}" -eq $YES ]; then + c_warning "building with current .config" +fi + + +################################################################################ +# Build configuration +################################################################################ + +if [ ! -z "${CONFIG_CMD}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then + c_info "Running CONFIG_CMD..." + (set -x; cd "${KERNEL_PATH}"; ${CONFIG_CMD}) +fi + +if [ ! -z "${DEFCONFIG}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then + c_info "Running DEFCONFIG [$DEFCONFIG]..." + (set -x; cd "${KERNEL_PATH}"; make ${DEFCONFIG}) +fi + +if [ ! -z "${BUILD_CONFIG_LIST}" ]; then + c_info "Running [merge_configs.sh]..." + list_configs=(${BUILD_CONFIG_LIST}) + timestamp=$(date +%s) + tmp_file="/tmp/${timestamp}" + for config in "${list_configs[@]}"; do + echo ${config} >> "${tmp_file}" + done + if [ -f "${KERNEL_PATH}/scripts/kconfig/merge_config.sh" ]; then + (set -x; cd "${KERNEL_PATH}"; \ + ./scripts/kconfig/merge_config.sh -m \ + .config ${tmp_file}) + else + c_error "No merge_config.sh script found" + exit $ENOENT + fi + rm -f ${tmp_file} + c_info "Running oldconfig after merge of configs" + (set -x; cd "${KERNEL_PATH}"; \ + yes "" 2>/dev/null | make oldconfig) +fi + + +################################################################################ +# Make the requried target +################################################################################ + +if [ ${BUILD_IMAGE} -eq $YES ]; then + c_info "Making [Image]..." + (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} "$KERNEL_IMAGE" >${OUT}) +fi + +if [ ${BUILD_DTBS} -eq $YES ]; then + c_info "Making [dtbs]..." + (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} dtbs >${OUT}) +fi + +if [ ${BUILD_MODULES} -eq $YES ]; then + c_info "Making [modules]..." + (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} modules >${OUT}) +fi + +if [ ${MAKE_MENUCONFIG} -eq $YES ]; then + c_info "Making [menuconfig]..." + (set -x; cd "${KERNEL_PATH}"; make menuconfig) +fi + +if [ ${MAKE_SAVEDEFCONFIG} -eq $YES ]; then + c_info "Making [savedefconfig]..." + (set -x; cd "${KERNEL_PATH}"; make savedefconfig) +fi + +if [ ${BUILD_NOW} -eq $YES ]; then + c_info "Making default target..." + (set -x; cd "${KERNEL_PATH}" make -j${NCPUS} >${OUT}) +fi + diff --git a/tools/wltests/helpers b/tools/wltests/helpers new file mode 100644 index 000000000..8922b3f68 --- /dev/null +++ b/tools/wltests/helpers @@ -0,0 +1,209 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Paths +################################################################################ + +# Setup a minimal path for the build scripts +BASE_DIR=$(realpath $BASE_DIR) + +# Required options +KERNEL_PATH=${KERNEL_PATH:-"/usr/src/linux"} + +# Generics +TOOLS_PATH="${BASE_DIR}/tools" +DEFINITIONS_PATH="${PLATFORM_PATH}/definitions" +ARTIFACTS_PATH="${PLATFORM_PATH}/artifacts" + +# Android +ANDROID_SCRIPTS_PATH="${BASE_DIR}/android" +ANDROID_OUTPUT_PATH="${PLATFORM_PATH}/artifacts" + + +################################################################################ +# Exit codes +################################################################################ + +OK=0 # Success +ENOENT=2 # No such file or directory +EIO=5 # I/O error +EAGAIN=11 # Try again +ENODEV=19 # No such device +EINVAL=22 # Invalid argument + +# Helper definitions +FATAL_ERROR=1 +NONFATAL_ERROR=2 +SUCCESS_CODE=$OK + + +################################################################################ +# Logging functions +################################################################################ +c_error() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${red}$NOW - ERROR: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + + local message="$1" + echo -e "${red}$NOW - ERROR : ${message}${nocol}" +} + +c_warning() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${yellow}$NOW - WARNING: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + local message="$1" + echo -e "${yellow}$NOW - WARNING : ${message}${nocol}" +} + +c_info() { + NOW=$(date +"%H:%m:%S") + # If there is only one parameter, let's assume it's just the message + if [ $# -gt 1 ]; then + local parent_lineno="$1" + local message="$2" + echo -e "${blue}$NOW - INFO: on or near line ${parent_lineno}: ${message}${nocol}" + return + fi + local message="$1" + echo -e "${blue}$NOW - INFO : ${message}${nocol}" +} + +d_notify() { + MESSAGE=$1 + ICON=$2 + # Let's try to send a desktop notification, + # silently fails if there is not support. + notify-send \ + --icon=$ICON \ + --urgency=critical \ + --expire-time=1500 \ + "Test Series" \ + "$MESSAGE" \ + 2>/dev/null +} + +my_tput() { + if [ "${TERM-dumb}" == dumb ]; then + return + fi + tput $* +} + +box_out() +{ + local s=("$@") b w + for l in "${s[@]}"; do + ((w<${#l})) && { b="$l"; w="${#l}"; } + done + my_tput setaf 3 + echo -e "|-${b//?/-}-|" + for l in "${s[@]}"; do + printf '| %s%*s%s |\n' "$(my_tput setaf 4)" "-$w" "$l" "$(my_tput setaf 3)" + # echo "|-${b//?/-}-|" + done + echo "|-${b//?/-}-|" + my_tput sgr 0 +} + + +################################################################################ +# Utilities +################################################################################ + +c_extract() { + if [ -f $1 ] ; then + case $1 in + *.tar.xz) + tar xvJf $1 -C $2 + ;; + *.tar.bz2) + tar xvjf $1 -C $2 + ;; + *.tar.gz) + tar xvzf $1 -C $2 + ;; + *.rar) + unrar x $1 $2 + ;; + *.tar) + tar xvf $1 -C $2 + ;; + *.tbz2) + tar xvjf $2 -C $2 + ;; + *.tgz) + tar xvzf $1 -C $2 + ;; + *.zip) + unzip $1 -d $2 + ;; + *.7z) + 7z x $1 -o $2 + ;; + *) + c_error "don't know to extract archive $1" + exit $EINVAL + ;; + esac + else + c_error "'$1' is not a valid file" + exit $ENOENT + fi +} + + +################################################################################ +# Colors +################################################################################ + +if [ -t 1 ]; then + ncolors=$(my_tput colors) + if [ -n "${ncolors}" ] && [ ${ncolors} -ge 8 ]; then + nocol='\e[0m' # No Color + white='\e[1;37m' + black='\e[0;30m' + blue='\e[0;34m' + lblue='\e[1;34m' + green='\e[0;32m' + lgreen='\e[1;32m' + cyan='\e[0;36m' + lcyan='\e[1;36m' + red='\e[0;31m' + lred='\e[1;31m' + purple='\e[0;35m' + lpurple='\e[1;35m' + brown='\e[0;33m' + yellow='\e[1;33m' + grey='\e[0;30m' + lgrey='\e[0;37m' + fi +fi + diff --git a/tools/wltests/test_series b/tools/wltests/test_series new file mode 100755 index 000000000..66dca509c --- /dev/null +++ b/tools/wltests/test_series @@ -0,0 +1,706 @@ +#!/bin/bash +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# Other internal configurations +################################################################################ +BASE_DIR=$(dirname $(realpath $0)) +source "${BASE_DIR}/helpers" + + +################################################################################ +# Builds configuration +################################################################################ + +PLATFORM=${PLATFORM:-'hikey960_android-4.4'} +KERNEL_PATH=${KERNEL_PATH:-$BASE_DIR/kernel} +SERIES=${SERIES:-''} +TEST_CMD=${TEST_CMD:-'echo "Test DONE!"'} + +if [ -z $ANDROID_HOME ]; then + ADB=${ADB:-$(which adb)} + FASTBOOT=${FASTBOOT:-$(which fastboot)} +else + ADB=${ADB:-$ANDROID_HOME/platform-tools/adb} + FASTBOOT=${FASTBOOT:-$ANDROID_HOME/platform-tools/fastboot} +fi + +EMETER=${EMETER:-'ACME'} +ACME_IP=${ACME_IP:-'192.168.0.1'} +ACME_USB=${ACME_USB:-'device1'} + +DEVICE=${DEVICE:-''} +REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-180} + +DRYRUN=${DRYRUN:-0} + + +################################################################################ +# Check configuration +################################################################################ + +usage() { + cat < + + Such a list can be enriched by adding an "ID:" + in front of each line. + If an "ID:" column is present, the commits will + be considered in progressive "ID:" order while + discarding those with ID=00: + default: SERIES='' + + -t, --test_cmd PATH The path of a custom test script to run for + each kernel. + default: TEST_CMD='echo >>> Testing SHA1: ' + +Optional arguments: + + --adb the ADB binary to use + default: ADB=[ANDROID_HOME/platform-tools/adb|\$(which adb)] + --fastboot the FASTBOOT binary to use + default: FASTBOOT=[ANDROID_HOME/platform-tools/fatboot|\$(which fatboot)] + + --emeter the Energy Meter used to power-cycle the device + default: EMETER=ACME + --acme_ip the IP address of an ACME energy meter + default: ACME_IP=192.168.0.1 + --acme_usb the ACME channel used to control a USB + passthrought connetion + default ACME_USB=device1 + + --device the Android device to target + default DEVICE=Default + + --reboot_timeout maximum number of seconds to wait for a device + to complete the boot. + default REBOOT_TIMEOUT=180 + + --dryrun don't actually run any command + deafult: DRYRUN=0 + +EOF +} + +while [[ $# -gt 0 ]]; do + case $1 in + # Mandatory arguments + -p|--platform) + PLATFORM=$2 + shift + ;; + -k|--kernel_path) + KERNEL_PATH=$2 + shift + ;; + -s|--series) + SERIES=$2 + shift + ;; + -t|--test_cmd) + TEST_CMD=$2 + shift + ;; + + # Tools + --adb) + ADB=$2 + shift + ;; + --fastboot) + FASTBOOT=$2 + shift + ;; + + # Energy meter + --emeter) + EMETER=$2 + shift + ;; + --acme_ip) + ACME_IP=$2 + shift + ;; + --acme_usb) + ACME_USB=$2 + shift + ;; + + # Target Device + --device) + DEVICE=$2 + shift + ;; + + # Execution customization + --dryrun) + DRYRUN=1 + ;; + + # Usage notes + -h|--help) + usage + exit $OK + ;; + *) + c_error "Unknown option: $1" + usage + exit $EAGAIN + ;; + esac + shift # past argument or value +done + +# Prepare PLATFORM +export PLATFORM_PATH=$BASE_DIR/platforms/$PLATFORM +ASSETS_REQUIRED="definitions build_images flash_images" +for ASSET in $ASSETS_REQUIRED; do +if [ ! -f $PLATFORM_PATH/$ASSET ]; then + c_error "The specified PLATFORM=$PLATFORM is not supported, or it does not provide a [$ASSET] file" + # List all platforms which provides all the required assets + c_warning "Supported platforms are:" + ls $BASE_DIR/platforms | while read PLAT; do + RESULT=$OK + for ASSET in $ASSETS_REQUIRED; do + if [ ! -f $BASE_DIR/platforms/$PLAT/$ASSET ]; then + RESULT=$ENOENT; break; + fi + done + [ $RESULT == $OK ] || continue + c_warning " - $PLAT" + done + exit $EINVAL +fi +done +export PLATFORM_OVERLAY_PATH=$PLATFORM_PATH + +# Prepare KERNEL +export KERNEL_PATH=$(realpath -s $KERNEL_PATH) +grep -E "mainmenu .* Kernel Configuration" $KERNEL_PATH/Kconfig &>/dev/null +if [ $? -ne 0 ]; then + c_error "The KERNEL_PATH seems not pointing to a valid kernel source tree" + exit $EINVAL +fi + +# Prepare SERIES +if [ -z $SERIES ]; then + c_error "A valid commit series should be defined by SERIES" + exit $EINVAL +fi +if [ ! -f $SERIES ]; then + c_error "SERIES points to a non existing commit series" + exit $ENOENT +fi + + +# Prepare ADB and FASTBOOT commands to target the specified device +if [ ! -f $ADB ]; then + c_error "ADB command [$ADB] not found" + exit $EINVAL +fi +if [ ! -f $FASTBOOT ]; then + c_error "FASTBOOT command [$FASTBOOT] not found" + exit $EINVAL +fi +if [ "x$DEVICE" != "x" ]; then + ADB="$ADB -s $DEVICE" + FASTBOOT="$FASTBOOT -s $DEVICE" +fi +export ADB +export FASTBOOT + +# Prepare Energy Meter device +case $EMETER in +'ACME') + EMETER_CONF="ACME (ACME_USB: $ACME_USB @ ACME_IP: $ACME_IP)" + ;; +*) + c_error "Energy meter [EMETER=$EMETER] not supported" + exit $EINVAL + ;; +esac + +# Report current setup +box_out \ + "Mandatory conf" \ + " PLATFORM : $PLATFORM" \ + " KERNEL_PATH : $KERNEL_PATH" \ + " SERIES : $SERIES" \ + " TEST_CMD : $TEST_CMD" \ + "" \ + "Tools" \ + " ADB : $ADB" \ + " FASTBOOT : $FASTBOOT" \ + "" \ + "Energy meter" \ + " EMETER : $EMETER_CONF" \ + " DEVICE : $DEVICE" \ + "" \ + "Internals" \ + " Scripts dir : $BASE_DIR" \ + " DRYRUN : $DRYRUN" + +################################################################################ +# Device access via USB connection management +################################################################################ +usb_disconnect() { + c_info "Disconnecting USB..." + if [ $DRYRUN ]; then return; fi + case $EMETER in + 'ACME') + sh root@$ACME_IP \ + "echo 0 > /sys/bus/iio/devices/iio:$ACME_USB/in_active" + ;; + *) + c_error "Energy meter $EMETER not supported" + exit $EINVAL + ;; + esac +} + +usb_connect() { + c_info "Connecting USB..." + if [ $DRYRUN ]; then return; fi + case $EMETER in + 'ACME') + ssh root@$ACME_IP \ + "echo 1 > /sys/bus/iio/devices/iio:$ACME_USB/in_active" + ;; + *) + c_error "Energy meter $EMETER not supported" + exit $EINVAL + ;; + esac + sleep 5 +} + +################################################################################ +# FASTBOOT mode checking +################################################################################ +device_in_fastboot() { + if [ "x$DEVICE" != "x" ]; then + [[ $($FASTBOOT devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV + else + [[ $($FASTBOOT devices | wc -l) -gt 0 ]] || return $ENODEV + fi + return $OK +} +device_not_in_fastboot() { + device_in_fastboot || return $OK + return $EAGAIN +} + +################################################################################ +# ADB mode checking +################################################################################ +device_in_adb() { + if [ "x$DEVICE" != "x" ]; then + [[ $(adb devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV + else + [[ $(adb devices | wc -l) -gt 2 ]] || return $ENODEV + fi + return $OK +} +device_not_in_adb() { + device_in_adb || return $OK + return $EAGAIN +} + +################################################################################ +# DEVICE connection checks +################################################################################ +device_connected() { + seconds=${1:-5} + + [ $DRYRUN -eq 1 ] || \ + while [[ true ]]; do + device_not_in_fastboot || return $OK + device_not_in_adb || return $OK + + echo + c_warning "Device not in FASTBOOT nor in ADB mode" + let seconds-- + if [[ $seconds -eq 0 ]]; then + c_error "device not connected" + return $ENODEV + fi + usb_disconnect + c_warning "Check again in 1s..." + sleep 1 + usb_connect + done + return $OK +} +device_not_connected() { + device_connected || return $OK + return $EAGAIN +} + +device_status() { + _CNT='NO'; device_connected || _CNT='NO' + _FBT='NO'; device_in_fastboot || _FBT='NO' + _ADB='NO'; device_in_adb || _ADB='NO' + c_info "Current device Status" + c_info "Reachable : $_CNT" + c_info "Fastboot : $_FBT" + c_info "ADB Mode : $_ADB" +} + +################################################################################ +# DEVICE boot checks +################################################################################ +reboot_timedout() { + [ $ELAPSED -lt $REBOOT_TIMEOUT ] || return $OK + return $EAGAIN +} +reboot_not_timedout() { + reboot_timedout || return $OK + return $EAGAIN +} + +boot_completed() { + COMPLETED=$($ADB shell getprop sys.boot_completed) + [[ "x$COMPLETED" = x1* ]] || return $EAGAIN + return $OK +} +boot_not_completed() { + boot_completed || return $OK + return $EAGAIN +} + + +################################################################################ +# FASTBOOT mode +################################################################################ +reboot_fastboot() { + seconds=$1 + + # Check device is connected + if device_not_connected; then + c_error "device not connected" + return $ENODEV + fi + + # Already in fastboot mode: nothing to do + if device_in_fastboot; then + c_info "Device already in fastboot mode, continuing..." + return $OK + fi + + c_info "Device in ADB mode" + c_info "Rebooting into bootloader..." + $ADB reboot bootloader + + echo + c_info "Waiting for bootloader up to $REBOOT_TIMEOUT[s]..." + ELAPSED=0 + sleep 5 + + # Wait for device to leave ADB mode + [ $DRYRUN -eq 1 ] || \ + until device_not_in_adb || reboot_timedout; do + sleep 3; let ELAPSED+=3 + done + echo + if reboot_timedout; then + c_error "device not leaving ADB mode" + d_notify "Bootloader enter TIMEOUT!" face-embarrassed + exit $EIO + fi + + # Wait for device to enter FASTBOOT mode + [ $DRYRUN -eq 1 ] || \ + while device_not_in_fastboot && reboot_not_timedout; do + usb_disconnect &>/dev/null + sleep 3; let ELAPSED+=3 + usb_connect &>/dev/null + done + echo + if reboot_timedout; then + c_error "device not entering in FASTBOOT mode" + d_notify "Bootloader enter TIMEOUT!" face-embarrassed + exit $EIO + fi + + return $OK +} + +################################################################################ +# Build and test a specified SHA1 checkout +################################################################################ + +name_sha1() { + COMMIT_SHA1=${1:0:7} + MINLEN=12345 + + # In case the specified SHA1 has not a name, let's use the SHA1 + COMMIT_NAME=$COMMIT_SHA1 + + # Find a name for each possible REF + mkfifo tmp_pipe &>/dev/null + git -C $KERNEL_PATH for-each-ref \ + --sort=-committerdate \ + --format='%(objectname:short) %(refname:short)' \ + refs/heads/ refs/remotes/ refs/tags | + grep $COMMIT_SHA1 | awk '{print $2}' > tmp_pipe & + while IFS= read -r NAME; do + # Return the fist name starting by "test_" + if [[ $NAME = test_* ]]; then + COMMIT_NAME=${NAME//\//:} + break + fi + # Or the shorted name for a given SHA1 + if [ ${#NAME} -lt $MINLEN ]; then + MINLEN=${#NAME} + COMMIT_NAME=${NAME//\//:} + fi + done < tmp_pipe + rm tmp_pipe +} + +match_sha1() { + COMMIT_SHA1=$1 + + c_info "Current kernel: " + CURRENT=$($ADB shell 'uname -a') + c_info " $CURRENT" + + [[ $CURRENT = *$COMMIT_SHA1* ]] || return $EAGAIN + + return $OK +} + +build_sha1() { + COMMIT_SHA1=$1 + COMMIT_DESC=$2 + + ### Prepare KERNEL for build + pushd $KERNEL_PATH &>/dev/null + echo + c_info "Checkout kernel: $KERNEL_PATH @ $COMMIT_SHA1..." + git checkout $COMMIT_SHA1; ERROR=$? + if [ $ERROR -ne 0 ]; then + c_error "Failed to checkout [$COMMIT_SHA1]" + popd &>/dev/null + return $ERROR + fi + popd &>/dev/null + + ### Build all IMAGES + pushd $BASE_DIR &>/dev/null + $PLATFORM_PATH/build_images + RESULT=$? + popd &>/dev/null + if [ $RESULT -ne $OK ]; then + exit $ENOENT + fi + [ $RESULT -eq $OK ] || return $RESULT +} + +flash_sha1() { + COMMIT_SHA1=$1 + COMMIT_DESC=$2 + + build_sha1 "$COMMIT_SHA1" "$COMMIT_DESC"; RESULT=$? + [ $RESULT -eq $OK ] || return $RESULT + + ### Reboot device into BOOTLOADER + echo + c_info "Rebooting device into bootloader..." + attempts=3 + [ $DRYRUN -eq 1 ] || reboot_fastboot + [ $DRYRUN -eq 1 ] || \ + while [[ $? -eq $ERROR ]]; do + let attempts-- + if [[ $attempts -eq 0 ]]; then + c_error "device not entering FASTBOOT mode" + exit $EIO + fi + c_warning "Failed entering FASTBOOT mode, $attempts remaining attempts..." + reboot_fastboot + done + + ### Flash generated IMAGES + pushd $PLATFORM_PATH/artifacts &>/dev/null + [ $DRYRUN -eq 1 ] || $PLATFORM_PATH/flash_images + RESULT=$? + popd &>/dev/null + [ $RESULT -eq $OK ] || return $RESULT + + ### Reboot into new kernel + echo + c_info "Reboot new kernel..." + d_notify "Rebooting device..." face-monkey + [ $DRYRUN -eq 1 ] || $FASTBOOT reboot + + c_info "Waiting up to $REBOOT_TIMEOUT[s] for boot to complete..." + ELAPSED=0 + + #### Wait for device to reboot + [ $DRYRUN -eq 1 ] || \ + until device_in_adb || reboot_timedout; do + usb_disconnect &>/dev/null + sleep 3; let ELAPSED+=3 + usb_connect &>/dev/null + done + echo + if reboot_timedout; then + c_error "device not entering ADB mode" + d_notify "Device reboot TIMEOUT!" face-embarrassed + exit $EIO + fi + c_info "Device in ADB mode" + + ### Wait for boot to complete + [ $DRYRUN -eq 1 ] || \ + until boot_not_completed || reboot_not_timedout; do + sleep 3; let ELAPSED+=3 + done + echo + if reboot_timedout; then + c_error "device still booting?!?" + d_notify "Device boot completion TIMEOUT!" face-embarrassed + exit $EIO + fi + c_info "Boot completed, wait 10[s] more..." + sleep 10 # Add an additional safe margin + + ### Check that we are running the expected kernel + match_sha1 $COMMIT_SHA1; ERROR=$? + [ $ERROR -ne 0 ] || c_error "Failed to flash kernel [$COMMIT_DESC]!" + return $? +} + +test_sha1() { + COMMIT_SHA1=$1 + COMMIT_ID=$2 + COMMITS_COUNT=$3 + COMMIT_DESC="$(grep $COMMIT_SHA1 $SERIES)" + + # Get a name (if any) for the specified SHA1 + name_sha1 $COMMIT_SHA1 + c_info "Testing kernel:" + c_info " SeriesID : $COMMIT_DESC" + c_info " CommitID : $COMMIT_SHA1 $COMMIT_NAME" + + echo + c_info "Check current kernel..." + if device_in_adb; then + match_sha1 $COMMIT_SHA1 + if [ $? -ne $OK ]; then + c_info "Kernel update required!" + flash_sha1 $COMMIT_SHA1 "$COMMIT_DESC" + if [[ $? -ne 0 ]]; then + c_warning "Skipping kernel [$COMMIT_DESC]" + return + fi + fi + else + c_warning "Device not connected via ADB, cannot check current kernel" + c_warning "Forcing: build, flashing and reboot selected kernel" + flash_sha1 $COMMIT_SHA1 "$COMMIT_DESC" + if [[ $? -ne 0 ]]; then + c_warning "Skipping kernel [$COMMIT_DESC]" + d_notify "Skipping kernel [$COMMIT_DESC]" face-sick + return + fi + fi + + echo + c_info "Running tests for [$COMMIT_SHA1: $COMMIT_NAME]..." + d_notify "Testing kernel $COMMIT_ID/$COMMITS_COUNT:\n$COMMIT_SHA1: $COMMIT_NAME..." face-tired + [ $DRYRUN -eq 1 ] || (set -x; eval $TEST_CMD; set +x) +} + +# Prepare list of commits to test +SELECTED_LIST="$(realpath $SERIES).selected" +FMT=$(grep -v "^#" $SERIES | grep -v "00: " | head -n1 | awk '{print $1}') +if [[ $FMT = *: ]]; then + # Filter out disabled commits and sort based on progressive ID: + grep -E -v "^$|^#|00: " $SERIES | sort \ + | awk '{$1=""; print $0}' > $SELECTED_LIST +else + grep -E -v "^$|^#" $SERIES \ + | awk '{print $0}' > $SELECTED_LIST +fi + +# Report commits to be tested +COMMIT_ID=0 +echo +c_info "Commits selected for testing:" +cat $SELECTED_LIST | \ +while read COMMIT; do + let COMMIT_ID++ + STR=$(printf " %3s %s\n" $COMMIT_ID "$COMMIT") + c_info "$STR" +done + +# Extract total number of commits +COMMITS_COUNT=$(wc -l $SELECTED_LIST) +COMMITS_COUNT=${COMMITS_COUNT%% *} + +# Ensure USB is on at tests start +usb_connect 1>/dev/null +printf "\n%80s\n\n" | tr " " "=" + +# If the target isn't connected at the start, bail out +device_connected || exit 1 + +# Test each commit +COMMIT_ID=1 +# Here we read from an arbitrary file descriptor 10 to avoid overlaps with +# stdin generated by the adb commands in the loop body +while read -u10 COMMITS; do + # Extract SHA1 from commit description + COMMIT_SHA1=${COMMITS%% *} + + box_out "PROGRESS : $COMMIT_ID/$COMMITS_COUNT" \ + "COMMIT : $COMMITS" + echo + device_status + + echo + test_sha1 $COMMIT_SHA1 $COMMIT_ID $COMMITS_COUNT + + let COMMIT_ID++ + printf "\n%80s\n\n" | tr " " "=" + +done 10<$SELECTED_LIST + +d_notify "Tests completed!" face-cool + -- GitLab From 60996a6f4dddd1632fbc5b29a2ac607047ef4fea Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 6 Oct 2017 18:09:22 +0100 Subject: [PATCH 04/84] tools/wltests: add support for Hikey960 This provides the test_series definitions for an Hikey960 board running an Android-4.4 kernel. This platform definitions consistes of these modules: - definitions: a set of variables expected by the wltests scripts - build_images: a script to actually build all the images required to update the kernel on an Hikey960 Android target - flash_images: a script to actually flash all the images required to update the kernel on an Hikey960 Android target Signed-off-by: Patrick Bellasi --- .../hikey960_android-4.4/build_images | 20 ++++++++ .../hikey960_android-4.4/definitions | 47 +++++++++++++++++++ .../hikey960_android-4.4/flash_images | 25 ++++++++++ tools/wltests/test_series | 2 +- 4 files changed, 93 insertions(+), 1 deletion(-) create mode 100755 tools/wltests/platforms/hikey960_android-4.4/build_images create mode 100644 tools/wltests/platforms/hikey960_android-4.4/definitions create mode 100755 tools/wltests/platforms/hikey960_android-4.4/flash_images diff --git a/tools/wltests/platforms/hikey960_android-4.4/build_images b/tools/wltests/platforms/hikey960_android-4.4/build_images new file mode 100755 index 000000000..c2ae39b2e --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/build_images @@ -0,0 +1,20 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/../.." +source "${BASE_DIR}/helpers" +source "${PLATFORM_PATH}/definitions" + +################################################################################ +# Build all images required to test a new kernel +################################################################################ + +./build --image --dtbs --silent; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + +./android/create_boot_img.sh; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + +./android/create_dt_img.sh; ERROR=$? +[ $ERROR -eq 0 ] || exit $ERROR + diff --git a/tools/wltests/platforms/hikey960_android-4.4/definitions b/tools/wltests/platforms/hikey960_android-4.4/definitions new file mode 100644 index 000000000..ddb4677cf --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/definitions @@ -0,0 +1,47 @@ + +################################################################################ +# Build definitions for Hikey960 +################################################################################ + +PLATFORM_NAME="Hikey960_Android" + +################################################################################ +# Boot Image Configuration +################################################################################ + +KERNEL_IMAGE="${KERNEL_IMAGE:-Image.gz}" +KERNEL_CMDLINE="${KERNEL_CMDLINE:-\ + loglevel=15\ + androidboot.hardware=hikey960\ + androidboot.selinux=permissive\ + firmware_class.path=/system/etc/firmware\ + buildvariant=userdebug\ +}" +RAMDISK_IMAGE="${RAMDISK_IMAGE:-ramdisk.gz}" +KERNEL_DTB="${KERNEL_DTB:-hisilicon/hi3660-hikey960.dtb}" + +ANDROID_BOOT_IMAGE="${ANDROID_BOOT_IMAGE:-boot.img}" +ANDROID_IMAGE_BASE="${ANDROID_IMAGE_BASE:-0x0}" +ANDROID_IMAGE_PAGESIZE="${ANDROID_IMAGE_PAGESIZE:-2048}" +ANDROID_OS_VERSION="${ANDROID_OS_VERSION:-O}" +ANDROID_OS_PATCH_LEVEL="${ANDROID_OS_PATCH_LEVEL:-2017-04-05}" +ANDROID_TAGS_OFFSET="${ANDROID_TAGS_OFFSET:-0x07A00000}" +ANDROID_KERNEL_OFFSET="${ANDROID_KERNEL_OFFSET:-0x00080000}" +ANDROID_RAMDISK_OFFSET="${ANDROID_RAMDISK_OFFSET:-0x07C00000}" + +################################################################################ +# Device Tree Configuration +################################################################################ + +ANDROID_DTB_IMAGE="${ANDROID_DTB_IMAGE:-dts.img}" +ANDROID_DTB_COMPRESSED="${ANDROID_DTB_COMPRESSED:-YES}" + +################################################################################ +# Toolchain Configuration +################################################################################ + +DEFCONFIG="${DEFCONFIG:-hikey960_defconfig}" +CONFIG_CMD="${CONFIG_CMD:-}" +ARCH="${ARCH:-arm64}" +CROSS_COMPILE="${CROSS_COMPILE:-aarch64-linux-android-}" + diff --git a/tools/wltests/platforms/hikey960_android-4.4/flash_images b/tools/wltests/platforms/hikey960_android-4.4/flash_images new file mode 100755 index 000000000..926fde50e --- /dev/null +++ b/tools/wltests/platforms/hikey960_android-4.4/flash_images @@ -0,0 +1,25 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname $(realpath -s $0)) +BASE_DIR="$SCRIPT_DIR/../.." +source "${BASE_DIR}/helpers" +source "${PLATFORM_PATH}/definitions" + +################################################################################ +# Build all images required to test a new kernel +################################################################################ + +ls *.img >/dev/null; ERROR=$? +if [ $ERROR -ne 0 ]; then + c_error "No images to flash in $PWD" + exit $ENOENT +fi + +for IMAGE in $(ls *.img); do + PARTITION=${IMAGE%%.img} + echo + c_info "Flashing [$IMAGE] on [$PARTITION] partition..." + $FASTBOOT flash $PARTITION $IMAGE; ERROR=$? + [ $ERROR -eq 0 ] || exit $ERROR +done + diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 66dca509c..aa4b90ed1 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -602,7 +602,7 @@ flash_sha1() { ### Check that we are running the expected kernel match_sha1 $COMMIT_SHA1; ERROR=$? - [ $ERROR -ne 0 ] || c_error "Failed to flash kernel [$COMMIT_DESC]!" + [ $ERROR -eq 0 ] || c_error "Failed to flash kernel [$COMMIT_DESC]!" return $? } -- GitLab From 2446dd4dcfd927f3e4935a42d57524aeb1dd0e6a Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 6 Oct 2017 14:51:10 +0100 Subject: [PATCH 05/84] lisa_shell: add new commands to run wltests This adds a pair of new LISAShell commands to trigger WLTEST tests. - lisa-wltest-init: setup and configure Workload-Automation to properly support the execution of that tool as a test command of the test_series script - lisa-wltest-series: a wrapper of the test_series script, which ensure the test environment is properly configured before handling over to the actual test_series script This patch provides also a default and generic configuration for Workload Automation which is suitable for tests execution on Android devices. Signed-off-by: Patrick Bellasi --- .gitignore | 2 + src/shell/lisa_shell | 83 +++++++++++++++++++++++++++++ tools/wa_user_directory/config.yaml | 17 ++++++ 3 files changed, 102 insertions(+) create mode 100644 tools/wa_user_directory/config.yaml diff --git a/.gitignore b/.gitignore index 9d7e1ccc3..e86bc51e4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ /ipynb/*.pid /ipynb/server.url /vagrant +/tools/wa_venv +/tools/wa_user_directory/dependencies diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index af6e81ed6..944807a53 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -376,6 +376,89 @@ echo } +################################################################################ +# LISA Workloads utility functions +################################################################################ + +function lisa-wltest-init { +export WLTEST_HOME="$LISA_HOME/tools/wltests" +export WLTEST_VENV="$LISA_HOME/tools/wa_venv" +export WLTEST_WA="$LISA_HOME/tools/workload-automation" +export WA_USER_DIRECTORY="$LISA_HOME/tools/wa_user_directory" + +# If the python virtual env existis: +# let's assume everithing has been already setup and we are ready to go +if [ -d $WLTEST_VENV ]; then + source $WLTEST_VENV/bin/activate + return 0 +fi + +# Check for require dependencies +which virtualenv &>/dev/null +if [ $? -ne 0 ]; then +cat < pip install virtualenv + +EOF + return -1 +fi + +# Create and activate a python's virtual environment to be used for the +# installation of the required version of external libraries and tools +virtualenv $WLTEST_VENV +source $WLTEST_VENV/bin/activate + +# Clone Workload Automation and install it in the virtual environment +_lisa-update-submodules +pip install -e $WLTEST_WA +} + +function lisa-wltest-series { + +# Check that the environment is properly configured +if [ -z $ANDROID_HOME ]; then +cat </dev/null +if [ $? -ne 0 ]; then + cat < sudo apt-get install coreutils + +EOF + return -1 +fi + +# Ensure the wltest environment has been configured, and get the relative +# patch loaded in the environment +lisa-wltest-init +[ $? -eq 0 ] || exit -1 + +# Setup Python virutal env, only if not already done +echo $PATH | grep wa_venv &>/dev/null +[ $? -eq 0 ] || source $WLTEST_VENV/bin/activate + +# Run the build's provided test_series +$WLTEST_HOME/test_series "$@" +} + ################################################################################ # LISA Shell MAIN ################################################################################ diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml new file mode 100644 index 000000000..6174dd657 --- /dev/null +++ b/tools/wa_user_directory/config.yaml @@ -0,0 +1,17 @@ +device: generic_android + +# If you have multiple devices connected, set the ADB device ID here +# device_config: +# device: 0123456789ABCDEF + +energy_measurement: + instrument: acme_cape + instrument_parameters: + host: 192.168.0.1 + # Set channel name here (find using iio_info) + # iio-device: iio:device0 + # If not in $PATH: + # iio-capture: /path/to/iio-capture + +# Disable re-trying things that go wrong +max_retries: 0 -- GitLab From 168647560b4d49cf1f2c5e601a6e594966f7e2de Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 6 Oct 2017 19:17:15 +0100 Subject: [PATCH 06/84] tools/wltests: README: add initial documentation Mainly a placeholder... to be further extended. Signed-off-by: Patrick Bellasi --- tools/wltests/README.md | 107 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 tools/wltests/README.md diff --git a/tools/wltests/README.md b/tools/wltests/README.md new file mode 100644 index 000000000..0a369411e --- /dev/null +++ b/tools/wltests/README.md @@ -0,0 +1,107 @@ + +# WLTests - Workloads Tests on a Series of Commits + +The `lisa-wltest-series` takes a Linux kernel tree, a file containing a list of +commits, and a test command. It then compiles each of those kernels, boots them +on a remote Android target, and runs the test command for each of them. + +An IPython notebook is provided for analysing the results. + +## Initialization + +```bash +# Enter your LISA main folder +$> cd /path/to/your/LISA_HOME + +# Initialize a LISAShell +$> source init_env + +# Export your ANDROID_HOME +[LISAShell lisa] \> export ANDROID_HOME=/path/to/your/android-sdk-linux + +# Ensure your cross-compiler is in your PATH +[LISAShell lisa] \> export PATH=/path/to/your/cross-compiler/bin:$PATH +``` +## Prepare the target device + +In general your device should be pre-configured and flashed with an updated and +stable user-space. The userspace usually comes with a boot image (`boot.img`) +which provides also a ramdisk iamge. In order to be able to test different +kernels, you are requried to deploy the ramdisk which matches your `boot.img` +under the corresponding platform folder. + +For example, if you are targeting an hikey960 board running android-4.4, the +ramdisk image should be deployed under: +``` + tools/wltests/platforms/hikey960_android-4.4/ramdisk.gz +``` +Please, note that the name of the ramdisk image, in this example `ramdisk.gz`, +has to match the value for the `RAMDISK_IMAGE` variable defined by the paltform +definition file, in this example: +``` + tools/wltests/platforms/hikey960_android-4.4/definitions +``` + +### Hikey960 +By default, the firmware on that device reports a device ID when in FASTBOOT +mode which is different from the device ID reported when in ADB mode. +This is a major issue for the testing scripts since they required a mandatory +device ID which is expected to be the same in both ADB and FASTBOOT modes. + +To fix this, you can set a custom and unique device ID for you hikey960 baord +using the following command from FASTBOOT mode: + +```bash +# Set a unique device ID for both FASTBOOT and ADB modes: +[LISAShell lisa] \> DEVICE_ID="UniqueIdYouLike" +[LISAShell lisa] \> fastboot getvar nve:SN@$DEVICE_ID +``` + +## Download workload dependencies + +We cannot distribute the APK files required for this tool to run the workloads - +you will need to do that yourself. You can either install them directly on your +device (from the Play Store, if necessary), or populate +`$LISA_HOME/tools/wa_user_directory/dependencies` so that they can be +automatically installed. There should be one directory for each of the named +workloads, containing the required APK file, like so: + +``` +[LISAShell lisa] \> tree tools/wa_user_directory/dependencies/ +tools/wa_user_directory/dependencies/ +├── exoplayer +│   └── exoplayer-demo.apk +└── jankbench + └── jank-benchmark.apk +``` + +Note that the leaf filename of the .apk files is not important - the files' +content will be inspected using Android's packaging tools. + +#### Exoplayer + +Exoplayer can be built from source code. Clone +https://github.com/google/ExoPlayer, open the source tree in Android Studio, and +compile. This should result in a file named 'demo-noExtensions-debug.apk' + +## Using the tool + +You'll need to create a list of commits that you want to compare the performance +of. This should be a file in the format produced by running +`git log --no-color --oneline` in your kernel tree. + +The test command is typically a Workload Automation command - you can use +variable substitution to set the location of the output directory that will be +produced - see the example below. + +```bash +# Get a detailed description of the supported options +[LISAShell lisa] \> lisa-wltest-series --help + +# Minimal command line to run a Workload Automation agenda +[LISAShell lisa] \> lisa-wltest-series \ + --platform hikey960_android-4.4 \ + --kernel_path /path/to/your/kernel/hikey-linaro \ + --series /path/to/your/series.sha1 \ + --wa_agenda /path/to/your/agenda.yaml +``` -- GitLab From c677cf3d845b4c319c0b3f1e52a6e5b8b6921d3a Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 6 Oct 2017 16:51:21 +0100 Subject: [PATCH 07/84] tools/wltests: add out-of-tree WA3 workloads --- tools/wa_user_directory/config.yaml | 20 +- .../plugins/exoplayer/__init__.py | 209 ++++++++++++++++++ .../plugins/jankbench/__init__.py | 153 +++++++++++++ .../plugins/pcmark/__init__.py | 137 ++++++++++++ .../plugins/ubsystemuijanktests/__init__.py | 114 ++++++++++ 5 files changed, 626 insertions(+), 7 deletions(-) create mode 100644 tools/wa_user_directory/plugins/exoplayer/__init__.py create mode 100644 tools/wa_user_directory/plugins/jankbench/__init__.py create mode 100644 tools/wa_user_directory/plugins/pcmark/__init__.py create mode 100644 tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml index 6174dd657..1d756215a 100644 --- a/tools/wa_user_directory/config.yaml +++ b/tools/wa_user_directory/config.yaml @@ -1,16 +1,22 @@ +# Skeleton global config.yaml for WA3 device: generic_android -# If you have multiple devices connected, set the ADB device ID here -# device_config: -# device: 0123456789ABCDEF +# Prevents Geekbench and other such workloads from being run by accident. For +# devices where such workloads are safe (i.e. publicly-available devices), +# override this in a per-device config file and pass it to wa with the +# --config/-c option. +allow_phone_home: false +# Set up ACME energy_measurement: instrument: acme_cape instrument_parameters: - host: 192.168.0.1 - # Set channel name here (find using iio_info) - # iio-device: iio:device0 - # If not in $PATH: + host: baylibre-acme.local + # If collecting on multiple channels, or another channel than iio:device0, + # set them here: + iio_devices: ["iio:device0"] + + # If iio-capture is not in your $PATH, uncomment this and set the path. # iio-capture: /path/to/iio-capture # Disable re-trying things that go wrong diff --git a/tools/wa_user_directory/plugins/exoplayer/__init__.py b/tools/wa_user_directory/plugins/exoplayer/__init__.py new file mode 100644 index 000000000..658c29703 --- /dev/null +++ b/tools/wa_user_directory/plugins/exoplayer/__init__.py @@ -0,0 +1,209 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import defaultdict +import re +import os +import time +import urllib + +from wa import ApkWorkload, Parameter, ConfigError, WorkloadError +from wa.framework.configuration.core import settings +from wa.utils.types import boolean +from wa.utils.misc import ensure_directory_exists +from devlib.utils.android import grant_app_permissions + +# Regexps for benchmark synchronization +REGEXPS = { + 'start' : '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity', + 'duration' : '.*period \[(?P[0-9]+.*)\]', + 'end' : '.*state \[.+, .+, E\]', + 'dropped_frames': '.*droppedFrames \[(?P[0-9]+\.[0-9]+), (?P[0-9]+)\]' +} + + +DOWNLOAD_URLS = { + 'mp4_1080p': 'http://distribution.bbb3d.renderfarming.net/video/mp4/bbb_sunflower_1080p_30fps_normal.mp4', + 'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov', + 'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov', + 'ogg_18:45': 'http://upload.wikimedia.org/wikipedia/commons/c/ca/Tchaikovsky_-_Romeo_and_Juliet_Ouverture_-_Antal_Dorati_(1959).ogg', +} + + +class ExoPlayer(ApkWorkload): + """ + Android ExoPlayer + + ExoPlayer is the basic video player library that is used by the YouTube + android app. The aim of this workload is to test a proxy for YouTube + performance on targets where running the real YouTube app is not possible + due its dependencies. + + ExoPlayer sources: https://github.com/google/ExoPlayer + + The 'demo' application is used by this workload. It can easily be built by + loading the ExoPlayer sources into Android Studio. + + Version r2.4.0 built from commit d979469 is known to work + + Produces a metric 'exoplayer_dropped_frames' - this is the count of frames + that Exoplayer itself reports as dropped. This is not the same thing as the + dropped frames reported by gfxinfo. + """ + + name = 'exoplayer' + + video_directory = os.path.join(settings.dependencies_directory, name) + + package_names = ['com.google.android.exoplayer2.demo'] + versions = ['2.4.0'] + action = 'com.google.android.exoplayer.demo.action.VIEW' + default_format = 'mov_720p' + + parameters = [ + Parameter('version', allowed_values=versions, default=versions[-1], override=True), + Parameter('duration', kind=int, default=20, + description=""" + Playback duration of the video file. This becomes the duration of the workload. + If provided must be shorter than the length of the media. + """), + Parameter('format', allowed_values=DOWNLOAD_URLS.keys(), + description=""" + Specifies which format video file to play. Default is {} + """.format(default_format)), + Parameter('filename', + description=""" + The name of the video file to play. This can be either a path + to the file anywhere on your file system, or it could be just a + name, in which case, the workload will look for it in + ``{}`` + *Note*: either format or filename should be specified, but not both! + """.format(video_directory)), + Parameter('force_dependency_push', kind=boolean, default=False, + description=""" + If true, video will always be pushed to device, regardless + of whether the file is already on the device. Default is ``False``. + """), + ] + + def validate(self): + if self.format and self.filename: + raise ConfigError('Ether format *or* filename must be specified; but not both.') + + if not self.format and not self.filename: + self.format = self.default_format + + def _find_host_video_file(self): + """Pick the video file we're going to use, download it if necessary""" + if self.filename: + if self.filename[0] in './' or len(self.filename) > 1 and self.filename[1] == ':': + filepath = os.path.abspath(self.filename) + else: + filepath = os.path.join(self.video_directory, self.filename) + if not os.path.isfile(filepath): + raise WorkloadError('{} does not exist.'.format(filepath)) + return filepath + else: + # Search for files we've already downloaded + files = [] + for filename in os.listdir(self.video_directory): + format_ext, format_resolution = self.format.split('_') + _, file_ext = os.path.splitext(filename) + if file_ext == '.' + format_ext and format_resolution in filename: + files.append(os.path.join(self.video_directory, filename)) + + if not files: + # Download a file with the requested format + url = DOWNLOAD_URLS[self.format] + filepath = os.path.join(self.video_directory, os.path.basename(url)) + self.logger.info('Downloading {} to {}...'.format(url, filepath)) + urllib.urlretrieve(url, filepath) + return filepath + else: + if len(files) > 1: + self.logger.warn('Multiple files found for {} format. Using {}.' + .format(self.format, files[0])) + self.logger.warn('Use "filename"parameter instead of ' + '"format" to specify a different file.') + return files[0] + + def init_resources(self, context): + # Needs to happen first, as it sets self.format, which is required by + # _find_host_video_file + self.validate() + + ensure_directory_exists(self.video_directory) + self.host_video_file = self._find_host_video_file() + + def setup(self, context): + super(ExoPlayer, self).setup(context) + + grant_app_permissions(self.target, self.package) + + self.device_video_file = self.target.path.join(self.target.working_directory, + os.path.basename(self.host_video_file)) + if self.force_dependency_push or not self.target.file_exists(self.device_video_file): + self.logger.info('Copying {} to device.'.format(self.host_video_file)) + self.target.push(self.host_video_file, self.device_video_file, timeout=120) + + self.play_cmd = 'am start -a {} -d "file://{}"'.format(self.action, + self.device_video_file) + + self.monitor = self.target.get_logcat_monitor(REGEXPS.values()) + self.monitor.start() + + def run(self, context): + self.target.execute(self.play_cmd) + + self.monitor.wait_for(REGEXPS['start']) + self.logger.info('Playing media file') + + line = self.monitor.wait_for(REGEXPS['duration'])[0] + media_duration_s = int(round(float(re.search(REGEXPS['duration'], line) + .group('duration')))) + + self.logger.info('Media duration is {} seconds'.format(media_duration_s)) + + if self.duration > media_duration_s: + raise ConfigError( + "'duration' param ({}) longer than media duration ({})".format( + self.duration, media_duration_s)) + + if self.duration: + self.logger.info('Waiting {} seconds before ending playback' + .format(self.duration)) + time.sleep(self.duration) + else: + self.logger.info('Waiting for playback completion ({} seconds)' + .format(media_duration_s)) + self.monitor.wait_for(REGEXPS['end'], timeout = media_duration_s + 30) + + def update_output(self, context): + regex = re.compile(REGEXPS['dropped_frames']) + + dropped_frames = 0 + for line in self.monitor.get_log(): + match = regex.match(line) + if match: + dropped_frames += int(match.group('count')) + + context.add_metric('exoplayer_dropped_frames', dropped_frames, + lower_is_better=True) + + def teardown(self, context): + super(ExoPlayer, self).teardown(context) + self.monitor.stop() diff --git a/tools/wa_user_directory/plugins/jankbench/__init__.py b/tools/wa_user_directory/plugins/jankbench/__init__.py new file mode 100644 index 000000000..6f3bc198c --- /dev/null +++ b/tools/wa_user_directory/plugins/jankbench/__init__.py @@ -0,0 +1,153 @@ +# Copyright 2017 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import csv +import os +import re +import subprocess +import threading +import select +import sqlite3 + +from wa import Parameter, ApkWorkload +from wa.framework.exception import WorkloadError + +REGEXPS = { + 'start': (r'.*START.*' + 'cmp=com.android.benchmark/.app.RunLocalBenchmarksActivity.*'), + 'count': '.*iteration: (?P[0-9]+).*', + 'metrics': (r'.*Mean: (?P[0-9\.]+)\s+JankP: (?P[0-9\.]+)\s+' + 'StdDev: (?P[0-9\.]+)\s+Count Bad: (?P[0-9]+)\s+' + 'Count Jank: (?P[0-9]+).*'), + 'done': r'.*BenchmarkDone!.*', +} + +class Jankbench(ApkWorkload): + + name = 'jankbench' + description = """ + Google's Jankbench benchmark. + + Jankbench simulates user interaction with Android UI components and records + frame rendering times and 'jank' (rendering discontinuity) in an SQLite + database. This is believed to be a good proxy for the smoothness of user + experience. + + Dumps a JankbenchResults.sqlite file in the output directory. This database + contains a table 'ui_results' with a row for each frame, showing its + rendering time in ms in the 'total_duration' column, and whether or not it + was a jank frame in the 'jank_frame' column. + + This information is also extracted from the SQLite file and dumped as + jankbench_frames.csv. This is _not_ necessarily the same information as + provided by gfxinfo (fps instrument). + """ + + versions = ['1.0'] + activity = '.app.RunLocalBenchmarksActivity' + package = 'com.android.benchmark' + package_names = [package] + + target_db_path = '/data/data/{}/databases/BenchmarkResults'.format(package) + + test_ids = { + 'list_view' : 0, + 'image_list_view' : 1, + 'shadow_grid' : 2, + 'low_hitrate_text' : 3, + 'high_hitrate_text' : 4, + 'edit_text' : 5, + } + + parameters = [ + Parameter('test', + default=test_ids.keys()[0], allowed_values=test_ids.keys(), + description='Which Jankbench sub-benchmark to run'), + Parameter('run_timeout', kind=int, default=10 * 60, + description=""" + Timeout for workload execution. The workload will be killed if it hasn't completed + within this period. In seconds. + """), + Parameter('times', kind=int, default=1, constraint=lambda x: x > 0, + description=('Specifies the number of times the benchmark will be run in a "tight ' + 'loop", i.e. without performing setup/teardown in between.')), + ] + + def initialize(self, context): + super(Jankbench, self).initialize(context) + + # Need root to get results database + if not self.target.is_rooted: + raise WorkloadError('Jankbench workload requires device to be rooted') + + def setup(self, context): + super(Jankbench, self).setup(context) + self.monitor = self.target.get_logcat_monitor(REGEXPS.values()) + self.monitor.start() + + self.command = ( + 'am start -n com.android.benchmark/.app.RunLocalBenchmarksActivity ' + '--eia com.android.benchmark.EXTRA_ENABLED_BENCHMARK_IDS {0} ' + '--ei com.android.benchmark.EXTRA_RUN_COUNT {1}' + ).format(self.test_ids[self.test], self.times) + + + def run(self, context): + # All we need to do is + # - start the activity, + # - then use the JbRunMonitor to wait until the benchmark reports on + # logcat that it is finished, + # - pull the result database file. + + result = self.target.execute(self.command) + if 'FAILURE' in result: + raise WorkloadError(result) + else: + self.logger.debug(result) + + self.monitor.wait_for(REGEXPS['start'], timeout=30) + self.logger.info('Detected Jankbench start') + + self.monitor.wait_for(REGEXPS['done'], timeout=300*self.times) + + def extract_results(self, context): + # TODO make these artifacts where they should be + super(Jankbench, self).extract_results(context) + host_db_path = os.path.join(context.output_directory, + 'BenchmarkResults.sqlite') + self.target.pull(self.target_db_path, host_db_path, as_root=True) + context.add_artifact('jankbench_results_db', host_db_path, 'data') + + columns = ['_id', 'name', 'run_id', 'iteration', 'total_duration', 'jank_frame'] + jank_frame_idx = columns.index('jank_frame') + query = 'SELECT {} FROM ui_results'.format(','.join(columns)) + conn = sqlite3.connect(os.path.join(host_db_path)) + + csv_path = os.path.join(context.output_directory, 'jankbench_frames.csv') + jank_frames = 0 + with open(csv_path, 'wb') as f: + writer = csv.writer(f) + writer.writerow(columns) + for db_row in conn.execute(query): + writer.writerow(db_row) + if int(db_row[jank_frame_idx]): + jank_frames += 1 + context.add_artifact('jankbench_results_csv', csv_path, 'data') + + context.add_metric('jankbench_jank_frames', jank_frames, + lower_is_better=True) + + def teardown(self, context): + self.monitor.stop() diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py new file mode 100644 index 000000000..537a369b6 --- /dev/null +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import re +import time +from zipfile import ZipFile + +from wa import Parameter, Workload +from wa.framework.exception import WorkloadError + +REGEXPS = { + 'start' : '.*START.*com.futuremark.pcmark.android.benchmark', + 'end' : '.*onWebViewReady.*view_scoredetails.html', + 'result' : '.*received result for correct code, result file in (?P.*\.zip)', + 'score' : '\s*.*)Score>(?P[0-9]*)<' +} + +INSTALL_INSTRUCTIONS=""" +This workload has incomplete automation support. Please download the APK from +http://www.futuremark.com/downloads/pcmark-android.apk +and install it on the device. Then open the app on the device, and hit the +'install' button to set up the 'Work v2' benchmark. +""" + +class PcMark(Workload): + """ + Android PCMark workload + + TODO: This isn't a proper WA workload! It requires that the app is already + installed set up like so: + + - Install the APK from http://www.futuremark.com/downloads/pcmark-android.apk + - Open the app and hit "install" + + """ + name = 'pcmark' + + package = 'com.futuremark.pcmark.android.benchmark' + activity = 'com.futuremark.gypsum.activity.SplashPageActivity' + + package_names = ['com.google.android.youtube'] + action = 'android.intent.action.VIEW' + + parameters = [ + Parameter('test', default='work', allowed_values=['work'], + description='PCMark sub-benchmark to run'), + ] + + def initialize(self, context): + super(PcMark, self).initialize(context) + + # Need root to get results + if not self.target.is_rooted: + raise WorkloadError('PCMark workload requires device to be rooted') + + if not self.target.is_installed(self.package): + raise WorkloadError('Package not installed. ' + INSTALL_INSTRUCTIONS) + + path = ('/storage/emulated/0/Android/data/{}/files/dlc/pcma-workv2-data' + .format(self.package)) + if not self.target.file_exists(path): + raise WorkloadError('"Work v2" benchmark not installed through app. ' + + INSTALL_INSTRUCTIONS) + + def setup(self, context): + super(PcMark, self).setup(context) + + self.target.execute('am kill-all') # kill all *background* activities + self.target.execute('am start -n {}/{}'.format(self.package, self.activity)) + time.sleep(5) + + # TODO: we clobber the old auto-rotation setting here. + self.target.set_auto_rotation(False) + self._saved_screen_rotation = self.target.get_rotation() + # Move to benchmark run page + self.target.set_left_rotation() # Needed to make TAB work + self.target.execute('input keyevent KEYCODE_TAB') + self.target.execute('input keyevent KEYCODE_TAB') + + self.monitor = self.target.get_logcat_monitor() + self.monitor.start() + + def run(self, context): + self.target.execute('input keyevent KEYCODE_ENTER') + # Wait for page animations to end + time.sleep(10) + + [self.output] = self.monitor.wait_for(REGEXPS['result'], timeout=600) + + def extract_results(self, context): + # TODO should this be an artifact? + remote_zip_path = re.match(REGEXPS['result'], self.output).group('path') + local_zip_path = os.path.join(context.output_directory, + self.target.path.basename(remote_zip_path)) + print 'pulling {} -> {}'.format(remote_zip_path, local_zip_path) + self.target.pull(remote_zip_path, local_zip_path, as_root=True) + + print 'extracting' + with ZipFile(local_zip_path, 'r') as archive: + archive.extractall(context.output_directory) + + # Fetch workloads names and scores + score_regex = re.compile('\s*.*)Score>(?P[0-9]*)<') + with open(os.path.join(context.output_directory, 'Result.xml')) as f: + for line in f: + match = score_regex.match(line) + if match: + print 'MATCH' + metric_name = 'pcmark_{}'.format(match.group('name')) + print(metric_name) + print(match.group('score')) + context.add_metric(metric_name, match.group('score')) + + + def teardown(self, context): + super(PcMark, self).teardown(context) + + self.target.execute('am force-stop {}'.format(self.package)) + + self.monitor.stop() + self.target.set_rotation(int(self._saved_screen_rotation)) + diff --git a/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py b/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py new file mode 100644 index 000000000..76bf5a815 --- /dev/null +++ b/tools/wa_user_directory/plugins/ubsystemuijanktests/__init__.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2017, Arm Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import re + +from devlib.utils.android import grant_app_permissions + +from wa import ApkWorkload, Parameter, WorkloadError + +class UbSystemUiJankTests(ApkWorkload): + """ + AOSP UbSystemUiJankTests tests + + Performs actions on the System UI (launcher, settings, etc) so that UI + responsiveness can be evaluated. + + The .apk can be built with `make UbSystemUiJankTests` in the AOSP tree. + + Reports metrics the metrics reported by instrumentation system - these will + likely overlap with those reported by the 'fps' instrument, but should be + more accurately recorded. + """ + + name = 'ubsystemuijanktests' + + package_names = ['android.platform.systemui.tests.jank'] + + tests = [ + 'LauncherJankTests#testOpenAllAppsContainer', + 'LauncherJankTests#testAllAppsContainerSwipe', + 'LauncherJankTests#testHomeScreenSwipe', + 'LauncherJankTests#testWidgetsContainerFling', + 'SettingsJankTests#testSettingsFling', + 'SystemUiJankTests#testRecentAppsFling', + 'SystemUiJankTests#testRecentAppsDismiss', + 'SystemUiJankTests#testNotificationListPull', + 'SystemUiJankTests#testNotificationListPull_manyNotifications', + 'SystemUiJankTests#testQuickSettingsPull', + 'SystemUiJankTests#testUnlock', + 'SystemUiJankTests#testExpandGroup', + 'SystemUiJankTests#testClearAll', + 'SystemUiJankTests#testChangeBrightness', + 'SystemUiJankTests#testNotificationAppear', + 'SystemUiJankTests#testCameraFromLockscreen', + 'SystemUiJankTests#testAmbientWakeUp', + 'SystemUiJankTests#testGoToFullShade', + 'SystemUiJankTests#testInlineReply', + 'SystemUiJankTests#testPinAppearance', + 'SystemUiJankTests#testLaunchSettings', + ] + + parameters = [ + Parameter('test', default=tests[0], allowed_values=tests, + description='Which of the System UI jank tests to run') + ] + + def setup(self, context): + # Override the default setup method, as it calls + # self.apk.start_activity. We dont want to do that. + + self.apk.initialize_package(context) + self.target.execute('am kill-all') # kill all *background* activities + grant_app_permissions(self.target, self.package) + + self.target.clear_logcat() + + jclass = '{}.{}'.format(self.package, self.test) + self.command = 'am instrument -e iterations 1 -e class {} -w {}'.format( + jclass, self.package) + + def run(self, context): + self.output = self.target.execute(self.command) + + # You see 'FAILURES' if an exception is thrown. + # You see 'Process crashed' if it doesn't recognise the class for some + # reason. + # But neither reports an error in the exit code, so check explicitly. + if 'FAILURES' in self.output or 'Process crashed' in self.output: + raise WorkloadError('Failed to run workload: {}'.format(self.output)) + + def update_output(self, context): + # The 'am instrument' command dumps the instrumentation results into + # stdout. It also gets written by the autotester to a storage file - on + # my devices that is /storage/emulated/0/results.log, but I dont know if + # that's the same for every device. + # + # AOSP probably provides standard tooling for parsing this, but I don't + # know how to use it. Anyway, for this use-case just parsing stdout + # works fine. + + regex = re.compile('INSTRUMENTATION_STATUS: (?P[\w-]+)=(?P[0-9\.]+)') + + for line in self.output.splitlines(): + match = regex.match(line) + if match: + key = match.group('key') + value = float(match.group('value')) + + name = 'instrumentation_{}'.format(key) + context.add_metric(name, value, lower_is_better=True) -- GitLab From 1df61c1a7d6a225247130a71db65e39ef1f4eb92 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 6 Oct 2017 18:43:51 +0100 Subject: [PATCH 08/84] tools/wltests: add example agendas --- .../agendas/example-exoplayer-simple.yaml | 9 ++ tools/wltests/agendas/example-rich.yaml | 99 +++++++++++++++++++ 2 files changed, 108 insertions(+) create mode 100644 tools/wltests/agendas/example-exoplayer-simple.yaml create mode 100644 tools/wltests/agendas/example-rich.yaml diff --git a/tools/wltests/agendas/example-exoplayer-simple.yaml b/tools/wltests/agendas/example-exoplayer-simple.yaml new file mode 100644 index 000000000..d2e73474e --- /dev/null +++ b/tools/wltests/agendas/example-exoplayer-simple.yaml @@ -0,0 +1,9 @@ +# This is an example of a simple agenda: It simply runs the exoplayer workload 3 +# times. The workload parameters (such as video playback duration) are left to +# the workload's default, and target and instrumentation configuration will +# either be read from $LISA_HOME/tools/wltests/wa_user_directory/config.yaml or +# the defaults will be used. + +workloads: + - name: exoplayer + iterations: 3 diff --git a/tools/wltests/agendas/example-rich.yaml b/tools/wltests/agendas/example-rich.yaml new file mode 100644 index 000000000..2ac07d45f --- /dev/null +++ b/tools/wltests/agendas/example-rich.yaml @@ -0,0 +1,99 @@ +# This is an example agenda which shows a possible (rather large) set of +# workloads and data collection. +# +# This will run 5 iterations each of a large set of workloads, under each of +# PELT and WALT load tracking (it assumes that the target supports both +# mechanisms). +# + +config: + # Collect energy data and ftrace files + # You may want to edit your config.yaml to set up the energy_measurement + # instrument (an example configuration is provided in this repo). + instrumentation: [energy_measurement, trace-cmd] + +global: + # Do everything 5 times + iterations: 5 + + # To aid reproducibility and try to reduce noise in power measurements, + # minimise the screen brightness and set airplane mode on. + # TODO: will this break workloads that require internet? + runtime_params: + brightness: 0 + airplane_mode: true + +# "Sections" are groups of runtime configuration. In the results analysis they +# will be mapped to "tags" using the classifiers field below +sections: + - id: pelt # Just a human-readable name + classifiers: # Will be used to map job output to 'tags' when analysing + load_tracking: pelt_cls + runtime_params: # These are the actual parameters that get set on the target + sysfile_values: + /proc/sys/kernel/sched_use_walt_cpu_util: 0 + /proc/sys/kernel/sched_use_walt_task_util: 0 + + - id: walt + classifiers: + load_tracking: walt_cls + runtime_params: + sysfile_values: + /proc/sys/kernel/sched_use_walt_cpu_util: 1 + /proc/sys/kernel/sched_use_walt_task_util: 1 + +workloads: + # Sit on the homescreen for 15 seconds + - name: homescreen + id: homescreen_15s + workload_parameters: + duration: 15 + + # Play 30 seconds of a video with Exoplayer - this is the basis for the + # YouTube app, so it's hoped that this is a decent proxy for Youtube + # performance on devices where running the real app is impractical + - name: exoplayer + id: exoplayer_30s + workload_parameters: + duration: 30 + + - name: pcmark + id: pcmark + + - name: geekbench + id: geekbench + + # We need one entry for each of the Jankbench sub-benchmarks + - name: jankbench + # 'id' and 'classifiers' are optional - just to make the output directory + # easier to read/parse + id: jb_list_view + classifiers: + test: jb_list_view + # workload_parameters are the real parameters that influence what gets run + workload_parameters: + test: list_view + - name: jankbench + id: jb_image_list_view + classifiers: + test: jb_image_list_view + workload_parameters: + test: image_list_view + - name: jankbench + id: jb_shadow_grid + classifiers: + test: jb_shadow_grid + workload_parameters: + test: shadow_grid + - name: jankbench + id: jb_low_hitrate_text + classifiers: + test: jb_low_hitrate_text + workload_parameters: + test: low_hitrate_text + - name: jankbench + id: jb_edit_text + classifiers: + test: jb_edit_text + workload_parameters: + test: edit_text -- GitLab From df5a8f042c704fc4e4bb73733e0dbabf00dc4eae Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Mon, 9 Oct 2017 18:36:58 +0100 Subject: [PATCH 09/84] tools/wltests: use KERNEL instead of KERNEL_SRC This should improve clarity since this is asking for a kernel source tree. Signed-off-by: Patrick Bellasi --- tools/wltests/android/create_boot_img.sh | 2 +- tools/wltests/android/create_dt_img.sh | 2 +- tools/wltests/build | 26 ++++++++++---------- tools/wltests/test_series | 30 ++++++++++++------------ 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/tools/wltests/android/create_boot_img.sh b/tools/wltests/android/create_boot_img.sh index 59b1fb9a3..951c4634d 100755 --- a/tools/wltests/android/create_boot_img.sh +++ b/tools/wltests/android/create_boot_img.sh @@ -8,7 +8,7 @@ BASE_DIR="$SCRIPT_DIR/.." source "${BASE_DIR}/helpers" source "${DEFINITIONS_PATH}" -DEFAULT_KERNEL="${KERNEL_PATH}/arch/${ARCH}/boot/${KERNEL_IMAGE}" +DEFAULT_KERNEL="${KERNEL_SRC}/arch/${ARCH}/boot/${KERNEL_IMAGE}" KERNEL="${KERNEL:-$DEFAULT_KERNEL}" DEFAULT_RAMDISK="${PLATFORM_OVERLAY_PATH}/${RAMDISK_IMAGE}" diff --git a/tools/wltests/android/create_dt_img.sh b/tools/wltests/android/create_dt_img.sh index 839b42f50..f60fb900d 100755 --- a/tools/wltests/android/create_dt_img.sh +++ b/tools/wltests/android/create_dt_img.sh @@ -8,7 +8,7 @@ BASE_DIR="$SCRIPT_DIR/.." source "${BASE_DIR}/helpers" source "${DEFINITIONS_PATH}" -DEFAULT_DTB="${KERNEL_PATH}/arch/${ARCH}/boot/dts/${KERNEL_DTB}" +DEFAULT_DTB="${KERNEL_SRC}/arch/${ARCH}/boot/dts/${KERNEL_DTB}" DTB="${DTB:-$DEFAULT_DTB}" DEFAULT_DTB_IMAGE="${ARTIFACTS_PATH}/${ANDROID_DTB_IMAGE}" diff --git a/tools/wltests/build b/tools/wltests/build index 64867e1a8..6fea5dfc3 100755 --- a/tools/wltests/build +++ b/tools/wltests/build @@ -220,11 +220,11 @@ NCPUS="$(( 2 * $(nproc) ))" ################################################################################ if [ "${CLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then - (cd "${KERNEL_PATH}"; make clean) + (cd "${KERNEL_SRC}"; make clean) fi if [ "${DISTCLEAN_KERNEL}" -eq $YES ] || [ "${CLEAN_ALL}" -eq $YES ]; then - (cd "${KERNEL_PATH}"; make distclean) + (cd "${KERNEL_SRC}"; make distclean) fi if [ "${MAKE_KEEPCONFIG}" -eq $YES ]; then @@ -238,12 +238,12 @@ fi if [ ! -z "${CONFIG_CMD}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then c_info "Running CONFIG_CMD..." - (set -x; cd "${KERNEL_PATH}"; ${CONFIG_CMD}) + (set -x; cd "${KERNEL_SRC}"; ${CONFIG_CMD}) fi if [ ! -z "${DEFCONFIG}" ] && [ "${MAKE_KEEPCONFIG}" -eq $NO ]; then c_info "Running DEFCONFIG [$DEFCONFIG]..." - (set -x; cd "${KERNEL_PATH}"; make ${DEFCONFIG}) + (set -x; cd "${KERNEL_SRC}"; make ${DEFCONFIG}) fi if [ ! -z "${BUILD_CONFIG_LIST}" ]; then @@ -254,8 +254,8 @@ if [ ! -z "${BUILD_CONFIG_LIST}" ]; then for config in "${list_configs[@]}"; do echo ${config} >> "${tmp_file}" done - if [ -f "${KERNEL_PATH}/scripts/kconfig/merge_config.sh" ]; then - (set -x; cd "${KERNEL_PATH}"; \ + if [ -f "${KERNEL_SRC}/scripts/kconfig/merge_config.sh" ]; then + (set -x; cd "${KERNEL_SRC}"; \ ./scripts/kconfig/merge_config.sh -m \ .config ${tmp_file}) else @@ -264,7 +264,7 @@ if [ ! -z "${BUILD_CONFIG_LIST}" ]; then fi rm -f ${tmp_file} c_info "Running oldconfig after merge of configs" - (set -x; cd "${KERNEL_PATH}"; \ + (set -x; cd "${KERNEL_SRC}"; \ yes "" 2>/dev/null | make oldconfig) fi @@ -275,31 +275,31 @@ fi if [ ${BUILD_IMAGE} -eq $YES ]; then c_info "Making [Image]..." - (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} "$KERNEL_IMAGE" >${OUT}) + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} "$KERNEL_IMAGE" >${OUT}) fi if [ ${BUILD_DTBS} -eq $YES ]; then c_info "Making [dtbs]..." - (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} dtbs >${OUT}) + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} dtbs >${OUT}) fi if [ ${BUILD_MODULES} -eq $YES ]; then c_info "Making [modules]..." - (set -x; cd "${KERNEL_PATH}"; make -j${NCPUS} modules >${OUT}) + (set -x; cd "${KERNEL_SRC}"; make -j${NCPUS} modules >${OUT}) fi if [ ${MAKE_MENUCONFIG} -eq $YES ]; then c_info "Making [menuconfig]..." - (set -x; cd "${KERNEL_PATH}"; make menuconfig) + (set -x; cd "${KERNEL_SRC}"; make menuconfig) fi if [ ${MAKE_SAVEDEFCONFIG} -eq $YES ]; then c_info "Making [savedefconfig]..." - (set -x; cd "${KERNEL_PATH}"; make savedefconfig) + (set -x; cd "${KERNEL_SRC}"; make savedefconfig) fi if [ ${BUILD_NOW} -eq $YES ]; then c_info "Making default target..." - (set -x; cd "${KERNEL_PATH}" make -j${NCPUS} >${OUT}) + (set -x; cd "${KERNEL_SRC}" make -j${NCPUS} >${OUT}) fi diff --git a/tools/wltests/test_series b/tools/wltests/test_series index aa4b90ed1..5ceed629f 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -29,7 +29,7 @@ source "${BASE_DIR}/helpers" ################################################################################ PLATFORM=${PLATFORM:-'hikey960_android-4.4'} -KERNEL_PATH=${KERNEL_PATH:-$BASE_DIR/kernel} +KERNEL_SRC=${KERNEL_SRC:-$BASE_DIR/kernel} SERIES=${SERIES:-''} TEST_CMD=${TEST_CMD:-'echo "Test DONE!"'} @@ -58,7 +58,7 @@ DRYRUN=${DRYRUN:-0} usage() { cat </dev/null +# Prepare KERNEL_SRC +export KERNEL_SRC=$(realpath -s $KERNEL_SRC) +grep -E "mainmenu .* Kernel Configuration" $KERNEL_SRC/Kconfig &>/dev/null if [ $? -ne 0 ]; then - c_error "The KERNEL_PATH seems not pointing to a valid kernel source tree" + c_error "The KERNEL_SRC seems not to a valid kernel source tree path" exit $EINVAL fi @@ -259,7 +259,7 @@ esac box_out \ "Mandatory conf" \ " PLATFORM : $PLATFORM" \ - " KERNEL_PATH : $KERNEL_PATH" \ + " KERNEL_SRC : $KERNEL_SRC" \ " SERIES : $SERIES" \ " TEST_CMD : $TEST_CMD" \ "" \ @@ -473,7 +473,7 @@ name_sha1() { # Find a name for each possible REF mkfifo tmp_pipe &>/dev/null - git -C $KERNEL_PATH for-each-ref \ + git -C $KERNEL_SRC for-each-ref \ --sort=-committerdate \ --format='%(objectname:short) %(refname:short)' \ refs/heads/ refs/remotes/ refs/tags | @@ -509,10 +509,10 @@ build_sha1() { COMMIT_SHA1=$1 COMMIT_DESC=$2 - ### Prepare KERNEL for build - pushd $KERNEL_PATH &>/dev/null + ### Prepare KERNEL_SRC for build + pushd $KERNEL_SRC &>/dev/null echo - c_info "Checkout kernel: $KERNEL_PATH @ $COMMIT_SHA1..." + c_info "Checkout kernel: $KERNEL_SRC @ $COMMIT_SHA1..." git checkout $COMMIT_SHA1; ERROR=$? if [ $ERROR -ne 0 ]; then c_error "Failed to checkout [$COMMIT_SHA1]" -- GitLab From 6c15dbc18630bee19f1ded79c090efa2364195ce Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 11:19:07 +0100 Subject: [PATCH 10/84] tools/wltests: cosmetics Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 207 +++++++++++++++++++++----------------- 1 file changed, 113 insertions(+), 94 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 5ceed629f..ee86a937d 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -63,125 +63,138 @@ Usage: [ENV]... $(basename $0) -p PLATFORM -k KERNEL_SRC -s SERIES -t TEST_CMD [ Checkout, compile, flash a predefined set of kernel COMMITS and run a custom SCRIPT for each of them. -Mandatory arguments: +Kernel to test: - -p, --platform the platform to target - available platforms are the subfolders of the - top level "platforms" folder. - default: PLATFORM=juno_android-4.4 - - -k, --kernel_src PATH The kernel source tree to use + -k, --kernel_src PATH The kernel source tree to use. Default: KERNEL_SRC=../kernel - -s, --series the series of commits to test - this is a list which can be generated with: + -s, --series PATH The series of commits to test. + This is the patch of the file containing a list + of patches to test. This list can be generated + with a command like: git log --no-color --oneline - Such a list can be enriched by adding an "ID:" - in front of each line. + The list obtained by the previous command can + be enriched by adding an "ID:" in front of each + line. If an "ID:" column is present, the commits will be considered in progressive "ID:" order while discarding those with ID=00: - default: SERIES='' + Default: SERIES='' + +Target device to use for kernel testing: + + -p, --platform The platform to target. + Available platforms are the subfolders of the + top level "platforms" folder. + Default: PLATFORM=juno_android-4.4 + + -d, --device The Android device to target. + Default DEVICE=Default - -t, --test_cmd PATH The path of a custom test script to run for - each kernel. - default: TEST_CMD='echo >>> Testing SHA1: ' +Tests to run for each kernel tested on target: -Optional arguments: + -t, --test_cmd The command line of A custom test script to run + for each kernel. + Default: TEST_CMD='echo "Test DONE!"' - --adb the ADB binary to use - default: ADB=[ANDROID_HOME/platform-tools/adb|\$(which adb)] - --fastboot the FASTBOOT binary to use - default: FASTBOOT=[ANDROID_HOME/platform-tools/fatboot|\$(which fatboot)] +Additional arguments: - --emeter the Energy Meter used to power-cycle the device - default: EMETER=ACME - --acme_ip the IP address of an ACME energy meter - default: ACME_IP=192.168.0.1 - --acme_usb the ACME channel used to control a USB - passthrought connetion - default ACME_USB=device1 + --adb The ADB binary to use. + Default: ADB=[ANDROID_HOME/platform-tools/adb|\$(which adb)] + --fastboot The FASTBOOT binary to use. + Default: FASTBOOT=[ANDROID_HOME/platform-tools/fatboot|\$(which fatboot)] - --device the Android device to target - default DEVICE=Default + --emeter The Energy Meter used to power-cycle the device. + Default: EMETER=ACME + --acme_ip The IP address of an ACME energy meter. + Default: ACME_IP=192.168.0.1 + --acme_usb The ACME channel used to control a USB + assthrought connetion. + Default ACME_USB=device1 - --reboot_timeout maximum number of seconds to wait for a device + --reboot_timeout Maximum number of seconds to wait for a device to complete the boot. - default REBOOT_TIMEOUT=180 + Default REBOOT_TIMEOUT=180 + + --dryrun Don't actually run any command + Deafult: DRYRUN=0 + +Example command: - --dryrun don't actually run any command - deafult: DRYRUN=0 EOF } while [[ $# -gt 0 ]]; do case $1 in - # Mandatory arguments - -p|--platform) - PLATFORM=$2 - shift - ;; - -k|--kernel_src) - KERNEL_SRC=$2 - shift - ;; - -s|--series) - SERIES=$2 - shift - ;; - -t|--test_cmd) - TEST_CMD=$2 - shift - ;; - # Tools - --adb) - ADB=$2 - shift - ;; - --fastboot) - FASTBOOT=$2 - shift - ;; + # Kernel options + -k|--kernel_src) + KERNEL_SRC=$2 + shift + ;; + -s|--series) + SERIES=$2 + shift + ;; - # Energy meter - --emeter) - EMETER=$2 - shift - ;; - --acme_ip) - ACME_IP=$2 - shift - ;; - --acme_usb) - ACME_USB=$2 - shift - ;; + # Platform options + -p|--platform) + PLATFORM=$2 + shift + ;; + -d|--device) + DEVICE=$2 + shift + ;; + # Tests options + -t|--test_cmd) + TEST_CMD=$2 + shift + ;; - # Target Device - --device) - DEVICE=$2 - shift - ;; - # Execution customization - --dryrun) - DRYRUN=1 - ;; + # Tools options + --adb) + ADB=$2 + shift + ;; + --fastboot) + FASTBOOT=$2 + shift + ;; - # Usage notes - -h|--help) - usage - exit $OK - ;; - *) - c_error "Unknown option: $1" - usage - exit $EAGAIN - ;; + # Energy meter options + --emeter) + EMETER=$2 + shift + ;; + --acme_ip) + ACME_IP=$2 + shift + ;; + --acme_usb) + ACME_USB=$2 + shift + ;; + + # Execution customization + --dryrun) + DRYRUN=1 + ;; + + # Usage notes + -h|--help) + usage + exit $OK + ;; + *) + c_error "Unknown option: $1" + usage + exit $EAGAIN + ;; esac shift # past argument or value done @@ -213,17 +226,23 @@ export PLATFORM_OVERLAY_PATH=$PLATFORM_PATH export KERNEL_SRC=$(realpath -s $KERNEL_SRC) grep -E "mainmenu .* Kernel Configuration" $KERNEL_SRC/Kconfig &>/dev/null if [ $? -ne 0 ]; then - c_error "The KERNEL_SRC seems not to a valid kernel source tree path" + echo + c_error "The \$KERNEL_SRC|--kernel_src seems not to a valid kernel source tree path" + echo exit $EINVAL fi # Prepare SERIES if [ -z $SERIES ]; then - c_error "A valid commit series should be defined by SERIES" + echo + c_error "A valid commit series should be defined by \$SERIES|--series" + echo exit $EINVAL fi if [ ! -f $SERIES ]; then - c_error "SERIES points to a non existing commit series" + echo + c_error "\$SERIES|--series points to a non existing commit series" + echo exit $ENOENT fi -- GitLab From e48844aa8c28c91b069da895b48159628f6b7391 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 11:20:05 +0100 Subject: [PATCH 11/84] tools/wltests: add --supported_platforms options Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 44 +++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index ee86a937d..0210f0ac4 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -90,6 +90,9 @@ Target device to use for kernel testing: top level "platforms" folder. Default: PLATFORM=juno_android-4.4 + --supported_platforms Print a list of supported platforms which can + are valid values for the -p parameter. + -d, --device The Android device to target. Default DEVICE=Default @@ -127,6 +130,27 @@ Example command: EOF } +ASSETS_REQUIRED="definitions build_images flash_images" +list_supported_platforms() { + echo + echo "Supported platforms are:" + ls $BASE_DIR/platforms | while read PLAT; do + RESULT=$OK + for ASSET in $ASSETS_REQUIRED; do + if [ ! -f $BASE_DIR/platforms/$PLAT/$ASSET ]; then + RESULT=$ENOENT; break; + fi + done + [ $RESULT == $OK ] || continue + echo " - $PLAT" + done + echo + echo "New platforms can be added by copying and adapting one of the folder available" + echo "under the this base path:" + echo " $BASE_DIR/platforms" + echo +} + while [[ $# -gt 0 ]]; do case $1 in @@ -149,6 +173,11 @@ while [[ $# -gt 0 ]]; do DEVICE=$2 shift ;; + --supported_platforms) + list_supported_platforms + exit $OK + ;; + # Tests options -t|--test_cmd) TEST_CMD=$2 @@ -201,22 +230,11 @@ done # Prepare PLATFORM export PLATFORM_PATH=$BASE_DIR/platforms/$PLATFORM -ASSETS_REQUIRED="definitions build_images flash_images" for ASSET in $ASSETS_REQUIRED; do if [ ! -f $PLATFORM_PATH/$ASSET ]; then + echo c_error "The specified PLATFORM=$PLATFORM is not supported, or it does not provide a [$ASSET] file" - # List all platforms which provides all the required assets - c_warning "Supported platforms are:" - ls $BASE_DIR/platforms | while read PLAT; do - RESULT=$OK - for ASSET in $ASSETS_REQUIRED; do - if [ ! -f $BASE_DIR/platforms/$PLAT/$ASSET ]; then - RESULT=$ENOENT; break; - fi - done - [ $RESULT == $OK ] || continue - c_warning " - $PLAT" - done + list_supported_platforms exit $EINVAL fi done -- GitLab From aca88ec8b42b0ec95fad48f3d483c817964d60ba Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 11:21:14 +0100 Subject: [PATCH 12/84] tools/wltests: add --results and --force options Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 0210f0ac4..5a74b778c 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -45,6 +45,8 @@ EMETER=${EMETER:-'ACME'} ACME_IP=${ACME_IP:-'192.168.0.1'} ACME_USB=${ACME_USB:-'device1'} +RESULTS=${RESULTS:-$LISA_HOME/results/wltests} +FORCE=${FORCE:-0} DEVICE=${DEVICE:-''} REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-180} @@ -117,6 +119,11 @@ Additional arguments: assthrought connetion. Default ACME_USB=device1 + --results PATH The base path for all the generated result folders. + Default: RESULTS='$RESULTS' + + --force Force execution with a non empty RESULTS folder. + --reboot_timeout Maximum number of seconds to wait for a device to complete the boot. Default REBOOT_TIMEOUT=180 @@ -210,6 +217,13 @@ while [[ $# -gt 0 ]]; do ;; # Execution customization + --results) + RESULTS=$2 + shift + ;; + --force) + FORCE=1 + ;; --dryrun) DRYRUN=1 ;; @@ -264,6 +278,20 @@ if [ ! -f $SERIES ]; then exit $ENOENT fi +# Prepare RESULTS +if [ $FORCE -eq 0 -a \ + -d $RESULTS -a \ + "$(ls -A $RESULTS)" ]; then + echo + c_warning "The results folder:" + c_warning " $RESULTS" + c_warning "already exists and is not empty." + echo + c_info "Use \$FORCE|--force if you want to override results in that same folder" + echo + exit $EINVAL +fi + # Prepare ADB and FASTBOOT commands to target the specified device if [ ! -f $ADB ]; then @@ -298,7 +326,7 @@ box_out \ " PLATFORM : $PLATFORM" \ " KERNEL_SRC : $KERNEL_SRC" \ " SERIES : $SERIES" \ - " TEST_CMD : $TEST_CMD" \ + " RESULTS : $TEST_CMD_RESULTS" \ "" \ "Tools" \ " ADB : $ADB" \ -- GitLab From d4432301106cb5bd69a6bfa2a8bbe80573086147 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 11:22:13 +0100 Subject: [PATCH 13/84] tools/wltests: add --wa_agenda and -available_agendas options Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 43 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 5a74b778c..1859b23d2 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -31,6 +31,7 @@ source "${BASE_DIR}/helpers" PLATFORM=${PLATFORM:-'hikey960_android-4.4'} KERNEL_SRC=${KERNEL_SRC:-$BASE_DIR/kernel} SERIES=${SERIES:-''} +WA_AGENDA=${WA_AGENDA:-''} TEST_CMD=${TEST_CMD:-'echo "Test DONE!"'} if [ -z $ANDROID_HOME ]; then @@ -60,7 +61,7 @@ DRYRUN=${DRYRUN:-0} usage() { cat < Date: Tue, 10 Oct 2017 11:22:46 +0100 Subject: [PATCH 14/84] tools/wltests: lets --help report a usable example command line Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 1859b23d2..c0cfbcd98 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -140,6 +140,9 @@ Additional arguments: Example command: + $> lisa-wltest-series --kernel_src /path/to/your/kernel/hikey-linaro \\ + --series /path/to/your/series.sha1 --platform hikey960_android-4.4 \\ + --wa_agenda /path/to/your/agenda.yaml EOF } -- GitLab From 975b73e5716462cc5019f763b24ba350a29658fa Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 10 Oct 2017 12:36:29 +0100 Subject: [PATCH 15/84] tools/wltests: add --acme_channels and WA ACME's fragment configuration Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index c0cfbcd98..29284bee1 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -45,6 +45,7 @@ fi EMETER=${EMETER:-'ACME'} ACME_IP=${ACME_IP:-'192.168.0.1'} ACME_USB=${ACME_USB:-'device1'} +ACME_CHANNELS=${ACME_CHANNELS:-'0'} RESULTS=${RESULTS:-$LISA_HOME/results/wltests} FORCE=${FORCE:-0} @@ -125,6 +126,10 @@ Additional arguments: --acme_usb The ACME channel used to control a USB assthrought connetion. Default ACME_USB=device1 + --acme_channels A space separated list for channels ID to sample. + For example, "0 2" will enabled sampling on: + iio:device0 and iio:device2 + Default: ACME_CHANNELS="0" --results PATH The base path for all the generated result folders. Default: RESULTS='$RESULTS' @@ -246,6 +251,10 @@ while [[ $# -gt 0 ]]; do ACME_USB=$2 shift ;; + --acme_channels) + ACME_CHANNELS=$2 + shift + ;; # Execution customization --results) @@ -331,8 +340,25 @@ if [ ! -z $WA_AGENDA ]; then list_available_agendas exit $INVAL fi + # Generate a WA configuration fragment for the ACME cape + IIO_DEVICES="" + for CH in $ACME_CHANNELS; do + IIO_DEVICES="$IIO_DEVICES \"iio:device$CH\", " + done + ACME_CONF=$RESULTS/config_acme.yaml + mkdir -p $RESULTS &>/dev/null + cat >$ACME_CONF < Date: Tue, 10 Oct 2017 16:30:44 +0100 Subject: [PATCH 16/84] tools/wltests: make --device|$DEVICE a mandatory option Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 29284bee1..a21debbee 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -49,7 +49,7 @@ ACME_CHANNELS=${ACME_CHANNELS:-'0'} RESULTS=${RESULTS:-$LISA_HOME/results/wltests} FORCE=${FORCE:-0} -DEVICE=${DEVICE:-''} +DEVICE=${DEVICE:-'C00010FFBAADA555'} REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-180} DRYRUN=${DRYRUN:-0} @@ -98,7 +98,9 @@ Target device to use for kernel testing: are valid values for the -p parameter. -d, --device The Android device to target. - Default DEVICE=Default + If ANDROID_DEVICE is set, its value will be used + as a default. + Default DEVICE=$ANDROID_DEVICE Tests to run for each kernel tested on target: @@ -282,6 +284,23 @@ while [[ $# -gt 0 ]]; do shift # past argument or value done +# Prepare DEVICE +if [ "x$DEVICE" == "xC00010FFBAADA555" ]; then + if [ -z $ANDROID_DEVICE ]; then + echo + c_error "Target device not set" + echo + c_info "A target device must be specified by either" + c_info " - setting a value for \$ANDROID_SERIAL" + c_info " - setting a value for \$DEVICE" + c_info " - passing a --device option" + echo + else + DEVICE=$ANDROID_SERIAL + fi + exit $EINVAL +fi + # Prepare PLATFORM export PLATFORM_PATH=$BASE_DIR/platforms/$PLATFORM for ASSET in $ASSETS_REQUIRED; do -- GitLab From fa358b5b3c467da020acb642cc773748561cd23e Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 9 Oct 2017 18:16:39 +0100 Subject: [PATCH 17/84] tools/wltests: add simple example agenda for jankbench --- tools/wltests/agendas/example-jankbench.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 tools/wltests/agendas/example-jankbench.yaml diff --git a/tools/wltests/agendas/example-jankbench.yaml b/tools/wltests/agendas/example-jankbench.yaml new file mode 100644 index 000000000..3a0332e2e --- /dev/null +++ b/tools/wltests/agendas/example-jankbench.yaml @@ -0,0 +1,9 @@ +# This is an example of a simple agenda: It simply runs the jankbench workload 3 +# times. The workload parameters are left to the workload's default, and target +# and instrumentation configuration will either be read from +# $LISA_HOME/tools/wltests/wa_user_directory/config.yaml or the defaults will be +# used. + +workloads: + - name: jankbench + iterations: 3 -- GitLab From fe97e3396d37ea65b3402c4ed4c40595689c412f Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 10 Oct 2017 11:26:00 +0100 Subject: [PATCH 18/84] tools/wltests: More readme tweaks --- tools/wltests/README.md | 62 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/tools/wltests/README.md b/tools/wltests/README.md index 0a369411e..200bb0ead 100644 --- a/tools/wltests/README.md +++ b/tools/wltests/README.md @@ -7,6 +7,7 @@ on a remote Android target, and runs the test command for each of them. An IPython notebook is provided for analysing the results. + ## Initialization ```bash @@ -22,6 +23,7 @@ $> source init_env # Ensure your cross-compiler is in your PATH [LISAShell lisa] \> export PATH=/path/to/your/cross-compiler/bin:$PATH ``` + ## Prepare the target device In general your device should be pre-configured and flashed with an updated and @@ -78,11 +80,69 @@ tools/wa_user_directory/dependencies/ Note that the leaf filename of the .apk files is not important - the files' content will be inspected using Android's packaging tools. +If the tool finds that an .apk file is installed on the device, but not present +on the host, it will be pulled into your dependencies/ directory. + #### Exoplayer +Exoplayer is the underlying tech used by the YouTube Android app. The hope is +that it can be used as a proxy for Youtube performance on devices where running +Youtube itself is not practical. + Exoplayer can be built from source code. Clone https://github.com/google/ExoPlayer, open the source tree in Android Studio, and -compile. This should result in a file named 'demo-noExtensions-debug.apk' +compile. This should result in a file named 'demo-noExtensions-debug.apk'. + +#### Jankbench + +You'll need to get the Jankbench .apk from Google. + +#### YouTube + +By its nature, YouTube needs to be pre-installed on the device for the +automation to work. Note that WA3 has two YouTube workloads: The "youtube" +workload simulates UI interactions, while the "youtube_playback" simply plays a +video from a URL. The former workload appears to be susceptible to +reproducibility issues as the content that is rendered (such as advertisements +and video recommendations) can change between invocations. + +#### Geekbench + +The Geekbench automation should be pretty robust. The easiest way to get hold of +it is probably just to install it from the Play Store. Note that as Geekbench +poses a threat of 'phoning home', the tool marks it as dangerous. The WA3 +configuration file provided with this tool in +$LISA_HOME/tools/wa_user_directory/config.yaml sets "allow_phone_home: false" - +this is intended to prevent accidentally running Geekbench on a confidential +device. Therefore you will need to override that setting. If you don't have any +confidential devices you can simply edit that config file. Otherwise, it is best +to create a separate per-device config file that overrides it, for example: + +``` +$ cat hikey960-config.yaml +device_config: + device: 4669290103000000 + +allow_phone_home: true +``` + +Adding `-c /path/to/hikey960config.yaml` to the `wa` command will apply this +configuration. + +#### PCMark + +The PCMark automation support in this tool is very limited. You'll need to +manually install the .apk from +http://www.futuremark.com/downloads/pcmark-android.apk, open it on the device +and hit the 'install' button to install the 'Work' benchmark. +Note that an Internet connection is required to complete the installation. +Furthermore, the robustness of the UI automation is not up to the standards of +the other workloads in WA, so there may be issues running it on untested +devices. +A proper solution would require writing UiAutomator code in the vein of WA's +[Vellamo workload](https://github.com/ARM-software/workload-automation/blob/next/wa/workloads/vellamo/uiauto/app/src/main/java/com/arm/wa/uiauto/vellamo/UiAutomation.java). +Part of the reason this hasn't been done is that PCMark displays its content in +a WebView, which poses a challenge for automation with Android's API. ## Using the tool -- GitLab From a6fccfc584f7b1acaf96dbedba383586f12de017 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:14:51 +0100 Subject: [PATCH 19/84] tools/wltests: add initial support for reports generation This adds just a simple placeholder for generation of reports at the end of a test series. For the time being we just list the ERROR warnings reported by WA at the end of each test. This are still useful since they point directly to the logfile of interest. Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index a21debbee..79f8f82bd 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -801,6 +801,25 @@ test_sha1() { [ $DRYRUN -eq 1 ] || (set -x; eval $TEST_CMD; set +x) } +report() { + +if [[ $TEST_CMD = *"wa run"* ]]; then + # Check for ERRORs reported in WorkloadAutomation log files + c_info "Looking for WA errors..." + ERRORS=$(find $RESULTS -name run.log \ + -exec grep "WARNING executor: Please see" \{\} \;) + if [ -z $ERRORS ]; then + c_info "No Errors reported in WA logfiles" + else + c_warning "WA reported these errors:" + echo $ERRORS | while read ERR; do + c_warning " $(echo $ERR | awk '{print $7}')"; + done + fi +fi + +} + # Prepare list of commits to test SELECTED_LIST="$(realpath $SERIES).selected" FMT=$(grep -v "^#" $SERIES | grep -v "00: " | head -n1 | awk '{print $1}') @@ -856,5 +875,8 @@ while read -u10 COMMITS; do done 10<$SELECTED_LIST +# Generate reports +report + d_notify "Tests completed!" face-cool -- GitLab From 958a367f7c4afbd4de2eb869a547d6514c344059 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:15:45 +0100 Subject: [PATCH 20/84] lisa_shell: wltests: add devlib to the local virtualenv Signed-off-by: Patrick Bellasi --- src/shell/lisa_shell | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index 944807a53..3a2bdcfc0 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -384,6 +384,7 @@ function lisa-wltest-init { export WLTEST_HOME="$LISA_HOME/tools/wltests" export WLTEST_VENV="$LISA_HOME/tools/wa_venv" export WLTEST_WA="$LISA_HOME/tools/workload-automation" +export WLTEST_DL="$LISA_HOME/libs/devlib" export WA_USER_DIRECTORY="$LISA_HOME/tools/wa_user_directory" # If the python virtual env existis: @@ -416,6 +417,7 @@ source $WLTEST_VENV/bin/activate # Clone Workload Automation and install it in the virtual environment _lisa-update-submodules pip install -e $WLTEST_WA +pip install -e $WLTEST_DL } function lisa-wltest-series { -- GitLab From 9333fdc1194fe3190e36692799640552915ee053 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:22:53 +0100 Subject: [PATCH 21/84] lisa_shell: wltests: add a simple cleanup function This can be useful in case a user should experiment issues with the wltests setup and would like to start with a fresh installation. Signed-off-by: Patrick Bellasi --- src/shell/lisa_shell | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index 3a2bdcfc0..82f00de12 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -380,11 +380,17 @@ echo # LISA Workloads utility functions ################################################################################ +export WLTEST_VENV="$LISA_HOME/tools/wa_venv" + +function lisa-wltests-cleanup { +[ ! -d $WLTEST_VENV ] || return 0 +rm -rf $WLTEST_VENV +} + function lisa-wltest-init { export WLTEST_HOME="$LISA_HOME/tools/wltests" -export WLTEST_VENV="$LISA_HOME/tools/wa_venv" -export WLTEST_WA="$LISA_HOME/tools/workload-automation" export WLTEST_DL="$LISA_HOME/libs/devlib" +export WLTEST_WA="$LISA_HOME/tools/workload-automation" export WA_USER_DIRECTORY="$LISA_HOME/tools/wa_user_directory" # If the python virtual env existis: -- GitLab From 576ed0c4023381a5f7accc2504ea18f5a10e4313 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:56:20 +0100 Subject: [PATCH 22/84] tools/wltests: tune ftrace global configuration By default we do not want WA to spend time reporting traces, which also contributes to reduce the amount of data collected by the experiments. The trace size is also conservatively increased to 10M per CPU, which should be a good staring point for many workloads. Signed-off-by: Patrick Bellasi --- tools/wa_user_directory/config.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml index 1d756215a..a56448a9a 100644 --- a/tools/wa_user_directory/config.yaml +++ b/tools/wa_user_directory/config.yaml @@ -19,5 +19,9 @@ energy_measurement: # If iio-capture is not in your $PATH, uncomment this and set the path. # iio-capture: /path/to/iio-capture +trace_cmd: + buffer_size: 102400 + report: false + # Disable re-trying things that go wrong max_retries: 0 -- GitLab From e7603e92e924b783e349a75fd713434ccf66ed96 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 11:58:25 +0100 Subject: [PATCH 23/84] lisa_shell: wltests: use virtualenv only when required Likely we need virtuaenv only while WA3 is not yet mainline. Let's ensure the LISASHell keep using pre-configured Python modules. Signed-off-by: Patrick Bellasi --- src/shell/lisa_shell | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/shell/lisa_shell b/src/shell/lisa_shell index 82f00de12..b96f2434a 100755 --- a/src/shell/lisa_shell +++ b/src/shell/lisa_shell @@ -424,6 +424,9 @@ source $WLTEST_VENV/bin/activate _lisa-update-submodules pip install -e $WLTEST_WA pip install -e $WLTEST_DL + +# Leave the virtualenv once setup completed +deactivate } function lisa-wltest-series { @@ -465,6 +468,9 @@ echo $PATH | grep wa_venv &>/dev/null # Run the build's provided test_series $WLTEST_HOME/test_series "$@" + +# Leave the virtualenv once tests completed +deactivate } ################################################################################ -- GitLab From d1e888d9b00651395aea4530058b1e3936389291 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Mon, 16 Oct 2017 14:22:28 +0100 Subject: [PATCH 24/84] tools/wltests: make empty tests more robust Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 79f8f82bd..6cc612965 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -34,7 +34,7 @@ SERIES=${SERIES:-''} WA_AGENDA=${WA_AGENDA:-''} TEST_CMD=${TEST_CMD:-'echo "Test DONE!"'} -if [ -z $ANDROID_HOME ]; then +if [ -z "$ANDROID_HOME" ]; then ADB=${ADB:-$(which adb)} FASTBOOT=${FASTBOOT:-$(which fastboot)} else @@ -286,7 +286,7 @@ done # Prepare DEVICE if [ "x$DEVICE" == "xC00010FFBAADA555" ]; then - if [ -z $ANDROID_DEVICE ]; then + if [ -z "$ANDROID_DEVICE" ]; then echo c_error "Target device not set" echo @@ -324,7 +324,7 @@ if [ $? -ne 0 ]; then fi # Prepare SERIES -if [ -z $SERIES ]; then +if [ -z "$SERIES" ]; then echo c_error "A valid commit series should be defined by \$SERIES|--series" echo @@ -808,7 +808,7 @@ if [[ $TEST_CMD = *"wa run"* ]]; then c_info "Looking for WA errors..." ERRORS=$(find $RESULTS -name run.log \ -exec grep "WARNING executor: Please see" \{\} \;) - if [ -z $ERRORS ]; then + if [ -z "$ERRORS" ]; then c_info "No Errors reported in WA logfiles" else c_warning "WA reported these errors:" -- GitLab From 222190430a48c8bbaefe95c9061e871324dab582 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 18 Oct 2017 14:30:52 +0100 Subject: [PATCH 25/84] tools/wltests: disable airplane mode for geekbench --- tools/wltests/agendas/example-rich.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/wltests/agendas/example-rich.yaml b/tools/wltests/agendas/example-rich.yaml index 2ac07d45f..8a069112c 100644 --- a/tools/wltests/agendas/example-rich.yaml +++ b/tools/wltests/agendas/example-rich.yaml @@ -62,6 +62,8 @@ workloads: - name: geekbench id: geekbench + runtime_parameters: + airplane_mode: false # We need one entry for each of the Jankbench sub-benchmarks - name: jankbench -- GitLab From 746543596aeb687ee1977ac96df40e4accaa48f3 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 19 Oct 2017 10:59:12 +0100 Subject: [PATCH 26/84] tools/wltests: simplify code considering that $DEVICE is always defined Device is a mandatory parameter now, let's remove the cases for when it was not defined. Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 6cc612965..75c68bb94 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -389,12 +389,8 @@ if [ ! -f $FASTBOOT ]; then c_error "FASTBOOT command [$FASTBOOT] not found" exit $EINVAL fi -if [ "x$DEVICE" != "x" ]; then - ADB="$ADB -s $DEVICE" - FASTBOOT="$FASTBOOT -s $DEVICE" -fi -export ADB -export FASTBOOT +export ADB="$ADB -s $DEVICE" +export FASTBOOT="$FASTBOOT -s $DEVICE" # Prepare Energy Meter device case $EMETER in @@ -467,11 +463,7 @@ usb_connect() { # FASTBOOT mode checking ################################################################################ device_in_fastboot() { - if [ "x$DEVICE" != "x" ]; then - [[ $($FASTBOOT devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV - else - [[ $($FASTBOOT devices | wc -l) -gt 0 ]] || return $ENODEV - fi + [[ $($FASTBOOT devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV return $OK } device_not_in_fastboot() { @@ -483,11 +475,7 @@ device_not_in_fastboot() { # ADB mode checking ################################################################################ device_in_adb() { - if [ "x$DEVICE" != "x" ]; then - [[ $(adb devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV - else - [[ $(adb devices | wc -l) -gt 2 ]] || return $ENODEV - fi + [[ $(adb devices | grep -e $DEVICE | wc -l) -gt 0 ]] || return $ENODEV return $OK } device_not_in_adb() { -- GitLab From 60796d6b8980341f7c8b42453d433df6ea65ef76 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 19 Oct 2017 11:04:31 +0100 Subject: [PATCH 27/84] tools/wltests: better check for device being online Adb can report a device as being 'offline', let's refine the devices report for the expected 'device' string when they are actually available. Use the same cure for fastboot. Signed-off-by: Patrick Bellasi --- tools/wltests/test_series | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 75c68bb94..5c89e0f88 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -463,7 +463,7 @@ usb_connect() { # FASTBOOT mode checking ################################################################################ device_in_fastboot() { - [[ $($FASTBOOT devices | grep $DEVICE | wc -l) -gt 0 ]] || return $ENODEV + [[ $(fastboot devices | grep -e "$DEVICE.*fastboot" | wc -l) -gt 0 ]] || return $ENODEV return $OK } device_not_in_fastboot() { @@ -475,7 +475,7 @@ device_not_in_fastboot() { # ADB mode checking ################################################################################ device_in_adb() { - [[ $(adb devices | grep -e $DEVICE | wc -l) -gt 0 ]] || return $ENODEV + [[ $(adb devices | grep -e "$DEVICE.*device" | wc -l) -gt 0 ]] || return $ENODEV return $OK } device_not_in_adb() { -- GitLab From e2dc6ab5726874b6b2bff59bb6e1a942595d18c0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 5 Sep 2017 18:03:33 +0100 Subject: [PATCH 28/84] cpus_analysis: Add DataFrame getter for CPU wakeup events --- libs/utils/analysis/cpus_analysis.py | 26 +++++++++++++++++++++++++ tests/lisa/test_trace.py | 29 ++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/libs/utils/analysis/cpus_analysis.py b/libs/utils/analysis/cpus_analysis.py index d28f5b20c..416be843c 100644 --- a/libs/utils/analysis/cpus_analysis.py +++ b/libs/utils/analysis/cpus_analysis.py @@ -61,6 +61,32 @@ class CpusAnalysis(AnalysisModule): ctx_sw_df.index.name = 'cpu' return ctx_sw_df + def _dfg_cpu_wakeups(self, cpus=None): + """" + Get a DataFrame showing when a CPU was woken from idle + + :param cpus: List of CPUs to find wakeups for. If None, all CPUs. + :type cpus: list(int) or None + + :returns: :mod:`pandas.DataFrame` with one column ``cpu``, where each + row shows a time when the given ``cpu`` was woken up from + idle. + """ + if not self._trace.hasEvents('cpu_idle'): + self._log.warning('Events [cpu_idle] not found, cannot ' + 'get CPU wakeup events.') + return None + + cpus = cpus or range(self._trace.platform['cpus_count']) + + sr = pd.Series() + for cpu in cpus: + cpu_sr = self._trace.getCPUActiveSignal(cpu) + cpu_sr = cpu_sr[cpu_sr == 1] + cpu_sr = cpu_sr.replace(1, cpu) + sr = sr.append(cpu_sr) + + return pd.DataFrame({'cpu': sr}).sort_index() ############################################################################### # Plotting Methods diff --git a/tests/lisa/test_trace.py b/tests/lisa/test_trace.py index 25ccba24d..400f0f243 100644 --- a/tests/lisa/test_trace.py +++ b/tests/lisa/test_trace.py @@ -162,6 +162,35 @@ class TestTrace(TestCase): self.assertEqual(trace.platform['cpus_count'], 3) + def test_dfg_cpu_wakeups(self): + """ + Test the cpu_wakeups DataFrame getter + """ + trace = self.make_trace(""" + -0 [004] 519.021928: cpu_idle: state=4294967295 cpu_id=4 + -0 [004] 519.022147: cpu_idle: state=0 cpu_id=4 + -0 [004] 519.022641: cpu_idle: state=4294967295 cpu_id=4 + -0 [001] 519.022642: cpu_idle: state=4294967295 cpu_id=1 + -0 [002] 519.022643: cpu_idle: state=4294967295 cpu_id=2 + -0 [001] 519.022788: cpu_idle: state=0 cpu_id=1 + -0 [002] 519.022831: cpu_idle: state=2 cpu_id=2 + -0 [003] 519.022867: cpu_idle: state=4294967295 cpu_id=3 + -0 [003] 519.023045: cpu_idle: state=2 cpu_id=3 + -0 [004] 519.023080: cpu_idle: state=1 cpu_id=4 + """) + + df = trace.data_frame.cpu_wakeups() + + exp_index=[519.021928, 519.022641, 519.022642, 519.022643, 519.022867] + exp_cpus= [ 4, 4, 1, 2, 3] + self.assertListEqual(df.index.tolist(), exp_index) + self.assertListEqual(df.cpu.tolist(), exp_cpus) + + df = trace.data_frame.cpu_wakeups([2]) + + self.assertListEqual(df.index.tolist(), [519.022643]) + self.assertListEqual(df.cpu.tolist(), [2]) + class TestTraceNoClusterData(TestTrace): """ Test Trace without cluster data -- GitLab From be2203092eb43238d1ef95e1fe15417b41042b51 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 27 Sep 2017 11:21:54 +0100 Subject: [PATCH 29/84] trace: Drop NaNs from CPU active signal in getClusterActiveSignal Some CPUs' active signals may begin earlier than others. In that case, despite the fillna, we may still have NaNs, which break the reduce() call. Therefore just drop rows with NaNs. --- libs/utils/trace.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/utils/trace.py b/libs/utils/trace.py index 93a379e88..c7adabdb3 100644 --- a/libs/utils/trace.py +++ b/libs/utils/trace.py @@ -847,6 +847,10 @@ class Trace(object): ) active.fillna(method='ffill', inplace=True) + # There might be NaNs in the signal where we got data from some CPUs + # before others. That will break the .astype(int) below, so drop rows + # with NaN in them. + active.dropna(inplace=True) # Cluster active is the OR between the actives on each CPU # belonging to that specific cluster -- GitLab From 06347ac6254b0f302020798ed1bb54888e745f3f Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 29 Sep 2017 14:43:00 +0100 Subject: [PATCH 30/84] tools/wa_results_collector: Add WA3 results collector --- libs/utils/wa_results_collector.py | 775 +++++++++++++++++++++++++++++ 1 file changed, 775 insertions(+) create mode 100644 libs/utils/wa_results_collector.py diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py new file mode 100644 index 000000000..eb7922511 --- /dev/null +++ b/libs/utils/wa_results_collector.py @@ -0,0 +1,775 @@ +# Copyright 2017 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import namedtuple, defaultdict +import csv +import json +import numpy as np +import os +import pandas as pd +import subprocess + +from scipy.stats import ttest_ind +import matplotlib.cm as cm +import matplotlib.pyplot as plt + +from bart.common.Utils import area_under_curve +from devlib.target import KernelVersion +from trappy.utils import handle_duplicate_index + +from trace import Trace + +def git_find_shortest_symref(repo_path, sha1_in): + """ + Find the shortest symbolic reference (branch/tag) to a Git SHA1 + + Returns None if nothing points to the requested SHA1 + """ + repo_path = os.path.expanduser(repo_path) + possibles = [] + # Can't use git for-each-ref --points-at because it only came in in Git 2.7 + # which is not in Ubuntu 14.04 - check by hand instead. + branches = subprocess.check_output( + "git for-each-ref --sort=-committerdate " + "--format='%(objectname:short) %(refname:short)' " + "refs/heads/ refs/remotes/ refs/tags", + cwd=repo_path, shell=True) + for line in branches.splitlines(): + try: + sha1_out, name = line.strip().split() + except: + continue + if sha1_out[:7] == sha1_in[:7]: + possibles.append(name) + if not possibles: + return None + + return min(possibles, key=len) + +class WaResultsCollector(object): + """ + Collects, analyses and visualises results from multiple WA3 directories + + Takes a list of output directories from Workload Automation 3 and parses + them. Finds metrics reported by WA itself, and extends those metrics with + extra detail extracted from ftrace files, energy instrumentation output, and + workload-specific artifacts that are found in the output. + + Results can be grouped according to the following terms: + + - 'metric' is a specific measurable quantity such as a single frame's + rendering time or the average energy consumed during a workload run. + + - 'workload' is the general name of a workload such as 'jankbench' or + 'youtube'. + + - 'test' is as more specific identification for workload - for example this + might identify one of Jankbench's sub-benchmarks, or specifically playing + a certain video on Youtube for 30s. + + WaResultsCollector ultimately derives 'test' names from the + 'classifiers'::'test' field of the WA3 agenda file's 'workloads' entries. + + - 'tag' is an identifier for a set of run-time target configurations that + the target was run under. For example there might exist one 'tag' + identifying running under the schedutil governor and another for the + performance governor. + + WaResultsCollector ultimately derives 'tag' names from the 'classifiers' + field of the WA3 agenda file's 'sections' entries. + + - 'kernel' identifies the kernel that was running when the metric was + collected. This may be a SHA1 or a symbolic ref (branch/tag) derived from + a provided Git repository. To try to keep identifiers readable, common + prefixes of refs are removed: if the raw refs are 'test/foo/bar' and + 'test/foo/baz', they will be referred to just as 'bar' and 'baz'. + + Aside from the provided helper attributes, all metrics are exposed in a + DataFrame as the ``results_df`` attribute. + + :param wa_dirs: List of paths to WA3 output directories + + :param platform: Optional LISA platform description. If provided, used to + enrich extra metrics gleaned from trace analysis. + + :param kernel_repo_path: Optional path to kernel repository. WA3 reports the + SHA1 of the kernel that workloads were run against. If this + param is provided, the repository is search for symbolic + references to replace SHA1s in data representation. This is + purely to make the output more manageable for humans. + + :param use_cached_trace_metrics: This class uses LISA to parse and analyse + ftrace files for extra metrics. With multiple/large traces + this can take some time, so the extracted metrics are + cached in the provided output directories. Set this param + to False to disable this caching. + """ + def __init__(self, wa_dirs, platform=None, kernel_repo_path=None, + use_cached_trace_metrics=True): + self.platform = platform + self.use_cached_trace_metrics = use_cached_trace_metrics + + wa_dirs = [os.path.expanduser(p) for p in wa_dirs] + + df = pd.DataFrame() + for wa_dir in wa_dirs: + df = df.append(self._read_wa_dir(wa_dir)) + + kernel_refs = {} + if kernel_repo_path: + for sha1 in df['kernel_sha1'].unique(): + ref = git_find_shortest_symref(kernel_repo_path, sha1) + if ref: + kernel_refs[sha1] = ref + + common_prefix = os.path.commonprefix(kernel_refs.values()) + for sha1, ref in kernel_refs.iteritems(): + kernel_refs[sha1] = ref[len(common_prefix):] + + df['kernel'] = df['kernel_sha1'].replace(kernel_refs) + + self.results_df = df + + def _read_wa_dir(self, wa_dir): + """ + Get a DataFrame of metrics from a single WA3 output directory. + + Includes the extra metrics derived from workload-specific artifacts and + ftrace files. + + Columns returned: + + kernel_sha1,kernel,id,workload,tag,test,iteration,metric,value,units + """ + # A WA output directory looks something like: + # + # wa_output/ + # |- __meta/ + # | | - jobs.json + # | | (some other bits) + # |- results.csv + # |- pelt-wk1-jankbench-1/ + # | | - result.json + # | | (other results from iteration 1 of pelt-wk1, which is a + # | | jankbench job) + # |- pelt-wk1-jankbench-2/ + # [etc] + + # results.csv contains all the metrics reported by WA for all jobs. + df = pd.read_csv(os.path.join(wa_dir, 'results.csv')) + + # __meta/jobs.json describes the jobs that were run - we can use this to + # find extra artifacts (like traces and detailed energy measurement + # data) from the jobs, which we'll use to add additional metrics that WA + # didn't report itself. + with open(os.path.join(wa_dir, '__meta', 'jobs.json')) as f: + jobs = json.load(f)['jobs'] + + subdirs_done = [] + + # Keep track of how many times we've seen each job id so we know which + # iteration to look at (If we use the proper WA3 API this awkwardness + # isn't necessary). + next_iteration = defaultdict(lambda: 1) + + # Dicts mapping job IDs to things determined about the job - this will + # be used to add extra columns to the DataFrame (that aren't reported + # directly in WA's results.csv) + tag_map = {} + test_map = {} + job_dir_map = {} + + for job in jobs: + workload = job['workload_name'] + + job_id = job['id'] + + # If there's a 'tag' in the 'classifiers' object, use that to + # identify the runtime configuration. If not, use a representation + # of the full key=value pairs. + classifiers = job['classifiers'] + rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems()) + tag = classifiers.get('tag', rich_tag) + + if job_id in tag_map: + # Double check I didn't do a stupid + if tag_map[job_id] != tag: + raise RuntimeError('Multiple tags ({}, {}) found for job ID {}' + .format(tag, tag_map[job_id], job_id)) + + tag_map[job_id] = tag + + if 'test' in job['classifiers']: + # If the workload spec has a 'test' classifier, use that to + # identify it. + test = job['classifiers']['test'] + elif 'test' in job['workload_parameters']: + # If not, some workloads have a 'test' workload_parameter, try + # using that + test = job['workload_parameters']['test'] + else: + # Otherwise just use the workload name. + # This isn't ideal because it means the results from jobs with + # different workload parameters will be amalgamated. + test = workload + + if job_id in test_map: + # Double check I didn't do a stupid + if test_map[job_id] != test: + raise RuntimeError('Multiple tests ({}, {}) found for job ID {}' + .format(test, test_map[job_id], job_id)) + + test_map[job_id] = test + + iteration = next_iteration[job_id] + next_iteration[job_id] += 1 + + job_dir = os.path.join(wa_dir, + '-'.join([job_id, workload, str(iteration)])) + + job_dir_map[job_id] = job_dir + + # Jobs can fail due to target misconfiguration or other problems, + # without preventing us from collecting the results for the jobs + # that ran OK. + with open(os.path.join(job_dir, 'result.json')) as f: + job_result = json.load(f) + if job_result['status'] == 'FAILED': + print 'Skipping failed iteration {} of job {}'.format( + iteration, job_id) + continue + + extra_df = self._get_extra_job_metrics(job_dir, workload) + + extra_df.loc[:, 'workload'] = workload + extra_df.loc[:, 'iteration'] = iteration + extra_df.loc[:, 'id'] = job_id + extra_df.loc[:, 'tag'] = tag + extra_df.loc[:, 'test'] = test + + df = df.append(extra_df) + + df['tag'] = df['id'].replace(tag_map) + df['test'] = df['id'].replace(test_map) + # TODO: This is a bit lazy: we're storing the directory that every + # single metric came from in a DataFrame column. That's redundant really + # - instead, to get from a row in results_df to a job output directory, + # we should just store a mapping from kernel identifiers to wa_output + # directories, then derive at the job dir from that mapping plus the + # job_id+workload+iteration in the results_df row. This works fine for + # now, though - that refactoring would probably belong alongside a + # refactoring to use WA's own API for reading output directories. + df['_job_dir'] = df['id'].replace(job_dir_map) + df.loc[:, 'kernel_sha1'] = self._wa_get_kernel_sha1(wa_dir) + + return df + + def _get_trace_metrics(self, trace_path): + """ + Parse a trace (or used cached results) and extract extra metrics from it + + Returns a DataFrame with columns: + + metric,value,units + """ + cache_path = os.path.join(os.path.dirname(trace_path), 'lisa_trace_metrics.csv') + if self.use_cached_trace_metrics and os.path.exists(cache_path): + return pd.read_csv(cache_path) + + # I wonder if this should go in LISA itself? Probably. + + metrics = [] + events = ['irq_handler_entry', 'cpu_frequency', 'nohz_kick', 'sched_switch', + 'sched_load_cfs_rq', 'sched_load_avg_task'] + trace = Trace(self.platform, trace_path, events) + + if hasattr(trace.data_frame, 'cpu_wakeups'): # Not merged in LISA yet + metrics.append(('cpu_wakeup_count', len(trace.data_frame.cpu_wakeups()), None)) + + # Helper to get area under curve of multiple CPU active signals + def get_cpu_time(trace, cpus): + df = pd.DataFrame([trace.getCPUActiveSignal(cpu) for cpu in cpus]) + return df.sum(axis=1).sum(axis=0) + + clusters = trace.platform.get('clusters') + if clusters: + for cluster in clusters.values(): + name = '-'.join(str(c) for c in cluster) + + df = trace.data_frame.cluster_frequency_residency(cluster) + if df is None or df.empty: + print "Can't get cluster freq residency from {}".format(trace.data_dir) + else: + df = df.reset_index() + avg_freq = (df.frequency * df.time).sum() / df.time.sum() + metric = 'avg_freq_cluster_{}'.format(name) + metrics.append((metric, avg_freq, 'MHz')) + + df = trace.data_frame.trace_event('cpu_frequency') + df = df[df.cpu == cluster[0]] + metrics.append(('freq_transition_count_{}'.format(name), len(df), None)) + + active_time = area_under_curve(trace.getClusterActiveSignal(cluster)) + metrics.append(('active_time_cluster_{}'.format(name), + active_time, 'seconds')) + + metrics.append(('cpu_time_cluster_{}'.format(name), + get_cpu_time(trace, cluster), 'cpu-seconds')) + + metrics.append(('cpu_time_total', + get_cpu_time(trace, range(trace.platform['cpus_count'])), + 'cpu-seconds')) + + event = None + if trace.hasEvents('sched_load_cfs_rq'): + event = 'sched_load_cfs_rq' + row_filter = lambda r: r.path == '/' + column = 'util' + elif trace.hasEvents('sched_load_avg_cpu'): + event = 'sched_load_avg_cpu' + row_filter = lambda r: True + column = 'util_avg' + if event: + df = trace.data_frame.trace_event(event) + util_sum = (handle_duplicate_index(df)[row_filter] + .pivot(columns='cpu')[column].ffill().sum(axis=1)) + avg_util_sum = area_under_curve(util_sum) / (util_sum.index[-1] - util_sum.index[0]) + metrics.append(('avg_util_sum', avg_util_sum, None)) + + ret = pd.DataFrame(metrics, columns=['metric', 'value', 'units']) + ret.to_csv(cache_path, index=False) + + return ret + + def _get_extra_job_metrics(self, job_dir, workload): + """ + Get extra metrics (not reported directly by WA) from a WA job output dir + + Returns a DataFrame with columns: + + metric,value,units + """ + # return + # value,metric,units + metrics_df = pd.DataFrame() + + artifacts = self._read_artifacts(job_dir) + if 'trace-cmd-bin' in artifacts: + metrics_df = metrics_df.append( + self._get_trace_metrics(artifacts['trace-cmd-bin'])) + + # The "proper" way to do this would be to use WA's 'artifacts' + # mechanism, but for now just expecting a fixed filename is fine. + + if workload == 'jankbench': + df = pd.read_csv(os.path.join(job_dir, 'jankbench_frames.csv')) + df = pd.DataFrame({'value': df['total_duration']}) + df.loc[:, 'metric'] = 'frame_total_duration' + df.loc[:, 'units'] = 'ms' + + metrics_df = metrics_df.append(df) + + if 'energy_instrument_output' in artifacts: + df = pd.read_csv(artifacts['energy_instrument_output']) + df = pd.DataFrame({'value': df['device_power']}) + df.loc[:, 'metric'] = 'device_power_sample' + df.loc[:, 'units'] = 'watts' + + metrics_df = metrics_df.append(df) + + return metrics_df + + def _wa_get_kernel_sha1(self, wa_dir): + """ + Find the SHA1 of the kernel that a WA3 run was run against + """ + with open(os.path.join(wa_dir, '__meta', 'target_info.json')) as f: + target_info = json.load(f) + return KernelVersion(target_info['kernel_release']).sha1 + + def _select(self, tag='.*', kernel='.*', test='.*'): + _df = self.results_df + _df = _df[_df.tag.str.match(tag)] + _df = _df[_df.kernel.str.match(kernel)] + _df = _df[_df.test.str.match(test)] + return _df + + @property + def workloads(self): + return self.results_df['kernel'].unique() + + @property + def workloads(self): + return self.results_df['workload'].unique() + + @property + def tags(self): + return self.results_df['tag'].unique() + + def tests(self, workload=None): + df = self.results_df + if workload: + df = df[df['workload'] == workload] + return df['test'].unique() + + def workload_available_metrics(self, workload): + return (self.results_df + .groupby('workload').get_group(workload) + ['metric'].unique()) + + def do_boxplots(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], xlim=None): + """ + Display boxplots of a certain metric + + Creates horizontal boxplots of metrics in the results. Check + ``workloads`` and ``workload_available_metrics`` to find the available + workloads and metrics. Check ``tags``, ``tests`` and ``kernels`` + to find the names that results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + + df = (self._select(tag, kernel, test) + .groupby(['workload', 'metric']) + .get_group((workload, metric))) + + units = df['units'].unique() + if len(units) > 1: + raise RuntimError('Found different units for workload "{}" metric "{}": {}' + .format(workload, metric, units)) + [units] = units + + # Sort groups by mean duration - this will be the order of the plots + gb = df.groupby(by) + + # Convert the groupby into a DataFrame with a column for each group + max_group_size = max(len(group) for group in gb.groups.itervalues()) + _df = pd.DataFrame() + for group_name, group in gb: + # Need to pad the group's column so that they all have the same + # length + padding_length = max_group_size - len(group) + padding = pd.Series(np.nan, index=np.arange(padding_length)) + col = group['value'].append(padding) + col.index = np.arange(max_group_size) + _df[group_name] = col + + # Sort the columns so that the groups with the lowest mean get plotted + # at the top + avgs = _df.mean() + avgs = avgs.sort_values(ascending=False) + _df = _df[avgs.index] + + # Plot boxes sorted by mean + fig, axes = plt.subplots(figsize=(16,8)) + _df.boxplot(ax=axes, vert=False, showmeans=True) + fig.suptitle('"{}" for workload "{}"'.format(metric, workload)) + if xlim: + axes.set_xlim(xlim) + axes.set_xlabel('{} [{}]'.format(metric, units)) + plt.show() + + CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below']) + + def _get_cdf(self, data, threshold): + """ + Build the "Cumulative Distribution Function" (CDF) for the given data + """ + # Build the series of sorted values + ser = data.sort_values() + if len(ser) < 1000: + # Append again the last (and largest) value. + # This step is important especially for small sample sizes + # in order to get an unbiased CDF + ser = ser.append(pd.Series(ser.iloc[-1])) + df = pd.Series(np.linspace(0., 1., len(ser)), index=ser) + + # Compute percentage of samples above/below the specified threshold + below = float(max(df[:threshold])) + above = 1 - below + return self.CDF(df, threshold, above, below) + + def plot_cdf(self, workload='jankbench', metric='frame_total_duration', + threshold=16, tag='.*', kernel='.*', test='.*'): + """ + Display cumulative distribution functions of a certain metric + + Draws CDFs of metrics in the results. Check ``workloads`` and + ``workload_available_metrics`` to find the available workloads and + metrics. Check ``tags``, ``tests`` and ``kernels`` to find the + names that results can be filtered against. + + The most likely use-case for this is plotting frame rendering times + under Jankbench, so default parameters are provided to make this easy. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param threshold: Value to highlight in the plot - the likely use for + this is highlighting the maximum acceptable + frame-rendering time in order to see at a glance the + rough proportion of frames that were rendered in time. + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + + df = (self._select(tag, kernel, test) + .groupby(['workload', 'metric']) + .get_group((workload, metric))) + + units = df['units'].unique() + if len(units) > 1: + raise RuntimError('Found different units for workload "{}" metric "{}": {}' + .format(workload, metric, units)) + [units] = units + + test_cnt = len(df.groupby(['test', 'tag', 'kernel'])) + colors = iter(cm.rainbow(np.linspace(0, 1, test_cnt+1))) + + fig, axes = plt.subplots() + axes.axvspan(0, threshold, facecolor='g', alpha=0.1); + + labels = [] + lines = [] + for keys, df in df.groupby(['test', 'tag', 'kernel']): + labels.append("{:16s}: {:32s}".format(keys[2], keys[1])) + color = next(colors) + cdf = self._get_cdf(df['value'], threshold) + ax = cdf.df.plot(ax=axes, legend=False, xlim=(0,None), figsize=(16, 6), + title='Total duration CDF ({:.1f}% within {} [{}] threshold)'\ + .format(100. * cdf.below, threshold, units), + label=test, color=color) + lines.append(ax.lines[-1]) + axes.axhline(y=cdf.below, linewidth=1, + linestyle='--', color=color) + print "%-32s: %-32s: %.1f" % (keys[2], keys[1], 100.*cdf.below) + + axes.grid(True) + axes.legend(lines, labels) + plt.show() + + def find_comparisons(self, base_id=None, by='kernel'): + """ + Find metrics that changed between a baseline and variants + + The notion of 'variant' and 'baseline' is defined by the `by` param. If + by='kernel', then `base_id` should be a kernel SHA (or whatever key the + 'kernel' column in the results_df uses). If by='tag' then `base_id` + should be a WA 'tag id' (as named in the WA agenda). + """ + comparisons = [] + + # I dunno why I wrote this with a namedtuple instead of just a dict or + # whatever, but it works fine + Comparison = namedtuple('Comparison', ['metric', 'test', + 'base_id', 'base_mean', 'base_std', + 'new_id', 'new_mean', 'new_std', + 'diff', 'diff_pct', 'pvalue']) + + # If comparing by kernel, only check comparisons where the 'tag' is the same + # If comparing by tag, only check where kernel is same + if by == 'kernel': + invariant = 'tag' + elif by == 'tag': + invariant = 'kernel' + else: + raise ValueError('`by` must be "kernel" or "tag"') + + available_baselines = self.results_df[by].unique() + if base_id is None: + base_id = available_baselines[0] + if base_id not in available_baselines: + raise ValueError('base_id "{}" not a valid "{}" (available: {}). ' + 'Did you mean to set by="{}"?'.format( + base_id, by, available_baselines, invariant)) + + for metric, metric_results in self.results_df.groupby('metric'): + # inv_id will either be the id of the kernel or of the tag, + # depending on the `by` param. + # So wl_inv_results will be the results entries for that workload on + # that kernel/tag + for (workload, inv_id), wl_inv_results in metric_results.groupby(['test', invariant]): + gb = wl_inv_results.groupby(by)['value'] + + if base_id not in gb.groups: + print 'Skipping - No baseline results for workload [{}] {} [{}] metric [{}]'.format( + workload, invariant, inv_id, metric) + continue + + base_results = gb.get_group(base_id) + base_mean = base_results.mean() + + for group_id, group_results in gb: + if group_id == base_id: + continue + + # group_id is now a kernel id or a tag (depending on + # `by`). group_results is a slice of all the rows of self.results_df + # for a given metric, workload, tag/workload tuple. We + # create comparison object to show how that metric changed + # wrt. to the base tag/workload. + + group_mean = group_results.mean() + mean_diff = group_mean - base_mean + if base_mean: + mean_diff_pct = mean_diff * 100. / base_mean + else: + # umm..? + mean_diff_pct = 100 + + if len(group_results) <= 1 or len(base_results) <= 1: + # Can't do ttest_ind if we only have one sample. There + # are proper t-tests for this, but let's just assume the + # worst. + pvalue = 1.0 + elif mean_diff == 0: + # ttest_ind also gives a warning if the two data sets + # are the same and have no variance. I don't know why + # that is to be honest, but anyway if there's no + # difference in the mean, we don't care about the + # p-value. + pvalue = 1.0 + else: + # Find a p-value which hopefully represents the + # (complement of the) certainty that any difference in + # the mean represents something real. + pvalue = ttest_ind(group_results, base_results, equal_var=False).pvalue + + comparisons.append(Comparison( + metric, '_'.join([workload, str(inv_id)]), + base_id, base_mean, base_results.std(), + group_id, group_mean, group_results.std(), + mean_diff, mean_diff_pct, pvalue)) + + return pd.DataFrame(comparisons) + + def plot_comparisons(self, base_id=None, by='kernel'): + """ + Visualise metrics that changed between a baseline and variants + + The notion of 'variant' and 'baseline' is defined by the `by` param. If + by='kernel', then `base_id` should be a kernel SHA (or whatever key the + 'kernel' column in the results_df uses). If by='tag' then `base_id` + should be a WA 'tag id' (as named in the WA agenda). + """ + df = self.find_comparisons(base_id=base_id, by=by) + + if df.empty: + print 'No comparisons by {} found'.format(by) + if len(self.results_df[by].unique()) == 1: + print '... because there is only one {} in the results'.format(by) + return + + for test, workload_comparisons in df.groupby('test'): + fig, ax = plt.subplots(figsize=(15, len(workload_comparisons) / 2.)) + + thickness=0.3 + pos = np.arange(len(workload_comparisons['metric'].unique())) + colors = ['r', 'g', 'b'] + for i, (group, gdf) in enumerate(workload_comparisons.groupby('new_id')): + + bars = ax.barh(bottom=pos + (i * thickness), width=gdf['diff_pct'], + height=thickness, label=group, + color=colors[i % len(colors)], align='center') + for bar, pvalue in zip(bars, gdf['pvalue']): + bar.set_alpha(1 - (min(pvalue * 10, 0.95))) + + # add some text for labels, title and axes ticks + ax.set_xlabel('Percent difference') + [baseline] = workload_comparisons['base_id'].unique() + ax.set_title('{}: Percent difference compared to {} \nopacity depicts p-value' + .format(test, baseline)) + ax.set_yticklabels(gdf['metric']) + ax.set_yticks(pos + thickness / 2) + # ax.set_xlim((-50, 50)) + ax.legend(loc='best') + + ax.grid(True) + # ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5')) + + plt.show() + + def _read_artifacts(self, job_dir): + with open(os.path.join(job_dir, 'result.json')) as f: + ret = {a['name']: os.path.join(job_dir, a['path']) + for a in json.load(f)['artifacts']} + + # TODO: This is a workaround for a WA3 but that is now + # fixed (a65cffb705B1 - trace-cmd: add trace files as artificats) + # I'm just leaving it here so I can keep comparing data from runs before + # that fix went in. Once I don't need it any more, I'll remove it. + if 'trace.dat' in os.listdir(job_dir): + ret['trace-cmd-bin'] = os.path.join(job_dir, 'trace.dat') + return ret + + def get_artifacts(self, workload='.*', tag='.*', kernel='.*', test='.*', + iteration=1): + """ + Get a dict mapping artifact names to file paths for a specific job. + + artifact_name specifies the name of an artifact, e.g. 'trace_bin' to + find the ftrace file from the specific job run. The other parameters + should be used to uniquely identify a run of a job. + """ + df = self._select(tag, kernel, test) + df = df[df['workload'].str.match(workload)] + + job_dirs = df['_job_dir'].unique() + + if len(job_dirs) > 1: + raise ValueError( + "Params for get_artifacts don't uniquely identify a job. " + "for workload='{}' tag='{}' kernel='{}' test='{}' iteration={}, " + "found:\n{}" .format( + workload, tag, kernel, test, iteration, '\n'.join(job_dirs))) + if not job_dirs: + raise ValueError( + "No job found for " + "workload='{}' tag='{}' kernel='{}' test='{}' iteration={}" + .format(workload, tag, kernel, test, iteration)) + + [job_dir] = job_dirs + return self._read_artifacts(job_dir) + + def get_artifact(self, artifact_name, workload='.*', + tag='.*', kernel='.*', test='.*', + iteration=1): + """ + Get the path of an artifact attached to a job output. + + artifact_name specifies the name of an artifact, e.g. 'trace_bin' to + find the ftrace file from the specific job run. The other parameters + should be used to uniquely identify a run of a job. + """ + artifacts = self.get_artifacts(workload, tag, kernel, test, iteration) + + if not artifact_name in artifacts: + raise ValueError("No '{}' artifact found in {}".format( + artifact_name, job_dir)) + + return artifacts[artifact_name] -- GitLab From bf5ebf992d08f98f8155a06704ac9bebbb310228 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 2 Oct 2017 14:57:07 +0100 Subject: [PATCH 31/84] tools/wa_results_collector: Add thermal metrics from trace --- libs/utils/wa_results_collector.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index eb7922511..8ea22b47f 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -292,7 +292,7 @@ class WaResultsCollector(object): metrics = [] events = ['irq_handler_entry', 'cpu_frequency', 'nohz_kick', 'sched_switch', - 'sched_load_cfs_rq', 'sched_load_avg_task'] + 'sched_load_cfs_rq', 'sched_load_avg_task', 'thermal_temperature'] trace = Trace(self.platform, trace_path, events) if hasattr(trace.data_frame, 'cpu_wakeups'): # Not merged in LISA yet @@ -348,6 +348,15 @@ class WaResultsCollector(object): avg_util_sum = area_under_curve(util_sum) / (util_sum.index[-1] - util_sum.index[0]) metrics.append(('avg_util_sum', avg_util_sum, None)) + if trace.hasEvents('thermal_temperature'): + df = trace.data_frame.trace_event('thermal_temperature') + for zone, zone_df in df.groupby('thermal_zone'): + metrics.append('tz_{}_start_temp'.format( + thermal_zone, zone_df.iloc[0]['temp_prev'], 'milliCelcius')) + + avg_tmp = (area_under_curve(zone_df.temperature['temperature']) + / (zone_df.index[-1] - zone_df.index[0])) + ret = pd.DataFrame(metrics, columns=['metric', 'value', 'units']) ret.to_csv(cache_path, index=False) -- GitLab From b9b017503f7fadd1c98c92b9057bbf2f8c36e236 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 10 Oct 2017 15:07:48 +0100 Subject: [PATCH 32/84] tools/wa_results_collector: Error sensibly if given no dirs --- libs/utils/wa_results_collector.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 8ea22b47f..2df05ff32 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -121,6 +121,9 @@ class WaResultsCollector(object): self.platform = platform self.use_cached_trace_metrics = use_cached_trace_metrics + if not wa_dirs: + raise ValueError('Invalid wa_dirs ({})'.format(wa_dirs)) + wa_dirs = [os.path.expanduser(p) for p in wa_dirs] df = pd.DataFrame() -- GitLab From 9e629467a945a12b70a4427c1d1ece6aa0a04ab7 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 10 Oct 2017 15:08:04 +0100 Subject: [PATCH 33/84] tools/wa_results_collector: Fix up temperature trace metrics --- libs/utils/wa_results_collector.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 2df05ff32..5966468fa 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -354,11 +354,19 @@ class WaResultsCollector(object): if trace.hasEvents('thermal_temperature'): df = trace.data_frame.trace_event('thermal_temperature') for zone, zone_df in df.groupby('thermal_zone'): - metrics.append('tz_{}_start_temp'.format( - thermal_zone, zone_df.iloc[0]['temp_prev'], 'milliCelcius')) + metrics.append(('tz_{}_start_temp'.format(zone), + zone_df.iloc[0]['temp_prev'], + 'milliCelcius')) - avg_tmp = (area_under_curve(zone_df.temperature['temperature']) - / (zone_df.index[-1] - zone_df.index[0])) + if len(zone_df == 1): # Avoid division by 0 + avg_tmp = zone_df['temp'].iloc[0] + else: + avg_tmp = (area_under_curve(zone_df['temp']) + / (zone_df.index[-1] - zone_df.index[0])) + + metrics.append(('tz_{}_avg_temp'.format(zone), + avg_tmp, + 'milliCelcius')) ret = pd.DataFrame(metrics, columns=['metric', 'value', 'units']) ret.to_csv(cache_path, index=False) -- GitLab From dca07929d2414ba95703c4b4c0b25b6cc1b18972 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Thu, 12 Oct 2017 19:13:16 +0100 Subject: [PATCH 34/84] tools/wa_results_collector: Better handle lack of data --- libs/utils/wa_results_collector.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 5966468fa..f2b15cb5c 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -202,7 +202,7 @@ class WaResultsCollector(object): # If there's a 'tag' in the 'classifiers' object, use that to # identify the runtime configuration. If not, use a representation # of the full key=value pairs. - classifiers = job['classifiers'] + classifiers = job['classifiers'] or {} rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems()) tag = classifiers.get('tag', rich_tag) @@ -214,10 +214,10 @@ class WaResultsCollector(object): tag_map[job_id] = tag - if 'test' in job['classifiers']: + if 'test' in classifiers: # If the workload spec has a 'test' classifier, use that to # identify it. - test = job['classifiers']['test'] + test = classifiers['test'] elif 'test' in job['workload_parameters']: # If not, some workloads have a 'test' workload_parameter, try # using that @@ -255,6 +255,8 @@ class WaResultsCollector(object): continue extra_df = self._get_extra_job_metrics(job_dir, workload) + if extra_df.empty: + continue extra_df.loc[:, 'workload'] = workload extra_df.loc[:, 'iteration'] = iteration -- GitLab From 252b9fc547e63b557fabaab46d95b96bd37cd3ea Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 13 Oct 2017 14:20:25 +0100 Subject: [PATCH 35/84] tools/wa_results_collector: Improve error message for failing to find artifact --- libs/utils/wa_results_collector.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index f2b15cb5c..8197864cb 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -749,15 +749,8 @@ class WaResultsCollector(object): ret['trace-cmd-bin'] = os.path.join(job_dir, 'trace.dat') return ret - def get_artifacts(self, workload='.*', tag='.*', kernel='.*', test='.*', + def _find_job_dir(self, workload='.*', tag='.*', kernel='.*', test='.*', iteration=1): - """ - Get a dict mapping artifact names to file paths for a specific job. - - artifact_name specifies the name of an artifact, e.g. 'trace_bin' to - find the ftrace file from the specific job run. The other parameters - should be used to uniquely identify a run of a job. - """ df = self._select(tag, kernel, test) df = df[df['workload'].str.match(workload)] @@ -776,6 +769,18 @@ class WaResultsCollector(object): .format(workload, tag, kernel, test, iteration)) [job_dir] = job_dirs + return job_dir + + def get_artifacts(self, workload='.*', tag='.*', kernel='.*', test='.*', + iteration=1): + """ + Get a dict mapping artifact names to file paths for a specific job. + + artifact_name specifies the name of an artifact, e.g. 'trace_bin' to + find the ftrace file from the specific job run. The other parameters + should be used to uniquely identify a run of a job. + """ + job_dir = self._find_job_dir(workload, tag, kernel, test, iteration) return self._read_artifacts(job_dir) def get_artifact(self, artifact_name, workload='.*', @@ -788,10 +793,11 @@ class WaResultsCollector(object): find the ftrace file from the specific job run. The other parameters should be used to uniquely identify a run of a job. """ - artifacts = self.get_artifacts(workload, tag, kernel, test, iteration) + job_dir = self._find_job_dir(workload, tag, kernel, test, iteration) + artifacts = self._read_artifacts(job_dir) if not artifact_name in artifacts: - raise ValueError("No '{}' artifact found in {}".format( - artifact_name, job_dir)) + raise ValueError("No '{}' artifact found in {} (have {})".format( + artifact_name, job_dir, artifacts.keys())) return artifacts[artifact_name] -- GitLab From 574f10c65af68d590982ed512d130282ad6afeef Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 13 Oct 2017 16:20:22 +0100 Subject: [PATCH 36/84] tools/wa_results_collector: Fix mean_diff_pct when metric is 0 everywhere --- libs/utils/wa_results_collector.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 8197864cb..53c807dcc 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -658,11 +658,18 @@ class WaResultsCollector(object): group_mean = group_results.mean() mean_diff = group_mean - base_mean - if base_mean: + # Calculate percentage difference in mean metric value + if base_mean != 0: mean_diff_pct = mean_diff * 100. / base_mean else: - # umm..? - mean_diff_pct = 100 + # base mean is 0, can't divide by that. + if group_mean == 0: + # Both are 0 so diff_pct is 0 + mean_diff_pct =0 + else: + # Tricky one - base value was 0, new value isn't. + # Let's just call it a 100% difference. + mean_diff_pct = 100 if len(group_results) <= 1 or len(base_results) <= 1: # Can't do ttest_ind if we only have one sample. There -- GitLab From ca2d09283c264b10a4de754783728f8973a1d6dc Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 13 Oct 2017 18:14:14 +0100 Subject: [PATCH 37/84] tools/wa_results_collector: Fix bar thickness in comparisons plot --- libs/utils/wa_results_collector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 53c807dcc..a206dbb3e 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -717,7 +717,7 @@ class WaResultsCollector(object): for test, workload_comparisons in df.groupby('test'): fig, ax = plt.subplots(figsize=(15, len(workload_comparisons) / 2.)) - thickness=0.3 + thickness= 0.6 / len(workload_comparisons.groupby('new_id')) pos = np.arange(len(workload_comparisons['metric'].unique())) colors = ['r', 'g', 'b'] for i, (group, gdf) in enumerate(workload_comparisons.groupby('new_id')): -- GitLab From c1ad7cb3911f6483ce0314295786135279d98ba4 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 13 Oct 2017 18:24:19 +0100 Subject: [PATCH 38/84] tools/wa_results_collector: Try to clarify plot_comparisons That is some really incomprehensible code, so add some comments and try to improve the naming. --- libs/utils/wa_results_collector.py | 40 +++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index a206dbb3e..d5dbef079 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -714,23 +714,46 @@ class WaResultsCollector(object): print '... because there is only one {} in the results'.format(by) return - for test, workload_comparisons in df.groupby('test'): - fig, ax = plt.subplots(figsize=(15, len(workload_comparisons) / 2.)) + # Separate plot for each test (e.g. one plot for Jankbench list_view) + for test, test_comparisons in df.groupby('test'): + # Vertical size of plot depends on how many metrics we're comparing + # and how many things (kernels/tags) we're comparing metrics for. + # a.k.a the total length of the comparisons df. + fig, ax = plt.subplots(figsize=(15, len(test_comparisons) / 2.)) + + # pos is used as the Y-axis. The y-axis is a descrete axis with a + # point for each of the metrics we're comparing + num_metrics = len(test_comparisons['metric'].unique()) + pos = np.arange(num_metrics) + + # At each point on the discrete y-axis we'll have one bar for each + # comparison: one per kernel/tag (depending on the `by` param), minus + # one for the baseline. + # If there are more bars we'll need to make them thinner so they + # fit. The sum of the bars' thicknesses should be 60% of a tick on + # the 'y-axis'. + thickness= 0.6 / len(test_comparisons.groupby('new_id')) + + # TODO: something is up with the calculations above, because there's + # always a bit of empty space at the bottom of the axes. - thickness= 0.6 / len(workload_comparisons.groupby('new_id')) - pos = np.arange(len(workload_comparisons['metric'].unique())) colors = ['r', 'g', 'b'] - for i, (group, gdf) in enumerate(workload_comparisons.groupby('new_id')): - + for i, (group, gdf) in enumerate(test_comparisons.groupby('new_id')): + # For each of the things we're comparing we'll plot a bar chart + # but slightly shifted. That's how we get multiple bars on each + # y-axis point. bars = ax.barh(bottom=pos + (i * thickness), width=gdf['diff_pct'], height=thickness, label=group, color=colors[i % len(colors)], align='center') + # Decrease the opacity for comparisons with a high p-value + # TODO: This also decreases the opacity on the legend. I don't + # really know what to do about that. for bar, pvalue in zip(bars, gdf['pvalue']): bar.set_alpha(1 - (min(pvalue * 10, 0.95))) - # add some text for labels, title and axes ticks + # Add some text for labels, title and axes ticks ax.set_xlabel('Percent difference') - [baseline] = workload_comparisons['base_id'].unique() + [baseline] = test_comparisons['base_id'].unique() ax.set_title('{}: Percent difference compared to {} \nopacity depicts p-value' .format(test, baseline)) ax.set_yticklabels(gdf['metric']) @@ -739,7 +762,6 @@ class WaResultsCollector(object): ax.legend(loc='best') ax.grid(True) - # ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5')) plt.show() -- GitLab From 28859423d3db1464ddfda0949d8e3cf22eda2102 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 16:16:08 +0100 Subject: [PATCH 39/84] tools/wa_results_collector: use LisaLogger for reporting Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 23 +++++++++++++++-------- logging.conf | 8 +++++++- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index d5dbef079..88b22d154 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -20,11 +20,14 @@ import numpy as np import os import pandas as pd import subprocess +import logging from scipy.stats import ttest_ind import matplotlib.cm as cm import matplotlib.pyplot as plt +from conf import LisaLogging + from bart.common.Utils import area_under_curve from devlib.target import KernelVersion from trappy.utils import handle_duplicate_index @@ -118,6 +121,8 @@ class WaResultsCollector(object): """ def __init__(self, wa_dirs, platform=None, kernel_repo_path=None, use_cached_trace_metrics=True): + self._log = logging.getLogger('WaResultsCollector') + self.platform = platform self.use_cached_trace_metrics = use_cached_trace_metrics @@ -250,8 +255,8 @@ class WaResultsCollector(object): with open(os.path.join(job_dir, 'result.json')) as f: job_result = json.load(f) if job_result['status'] == 'FAILED': - print 'Skipping failed iteration {} of job {}'.format( - iteration, job_id) + self._log.warning('Skipping failed iteration %s of job %s', + iteration, job_id) continue extra_df = self._get_extra_job_metrics(job_dir, workload) @@ -315,7 +320,8 @@ class WaResultsCollector(object): df = trace.data_frame.cluster_frequency_residency(cluster) if df is None or df.empty: - print "Can't get cluster freq residency from {}".format(trace.data_dir) + self._log.warning("Can't get cluster freq residency from %s", + trace.data_dir) else: df = df.reset_index() avg_freq = (df.frequency * df.time).sum() / df.time.sum() @@ -589,7 +595,7 @@ class WaResultsCollector(object): lines.append(ax.lines[-1]) axes.axhline(y=cdf.below, linewidth=1, linestyle='--', color=color) - print "%-32s: %-32s: %.1f" % (keys[2], keys[1], 100.*cdf.below) + self._log.info("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below) axes.grid(True) axes.legend(lines, labels) @@ -639,8 +645,9 @@ class WaResultsCollector(object): gb = wl_inv_results.groupby(by)['value'] if base_id not in gb.groups: - print 'Skipping - No baseline results for workload [{}] {} [{}] metric [{}]'.format( - workload, invariant, inv_id, metric) + self._log.warning('Skipping - No baseline results for workload ' + '[%s] %s [%s] metric [%s]', + workload, invariant, inv_id, metric) continue base_results = gb.get_group(base_id) @@ -709,9 +716,9 @@ class WaResultsCollector(object): df = self.find_comparisons(base_id=base_id, by=by) if df.empty: - print 'No comparisons by {} found'.format(by) + self._log.error('No comparisons by %s found', by) if len(self.results_df[by].unique()) == 1: - print '... because there is only one {} in the results'.format(by) + self._log.warning('There is only one %s in the results', by) return # Separate plot for each test (e.g. one plot for Jankbench list_view) diff --git a/logging.conf b/logging.conf index f0bca7426..4a1a85e77 100644 --- a/logging.conf +++ b/logging.conf @@ -20,7 +20,7 @@ propagate=0 # For example, to enable debugging just for the TestEnv module, you need to # uncomment the lovver_TestEnv section and set: [loggers] -keys=root,Target,AndroidTarget,android,LinuxTarget,ssh,TestEnv,LisaTest,Executor,Workload,RTApp,Benchmark,local_connection,EnergyModel +keys=root,Target,AndroidTarget,android,LinuxTarget,ssh,TestEnv,LisaTest,Executor,Workload,RTApp,Benchmark,local_connection,EnergyModel,WaResultsCollector [logger_Target] qualname=Target @@ -100,6 +100,12 @@ level=INFO handlers=consoleHandler,fileHandler propagate=0 +[logger_WaResultsCollector] +qualname=WaResultsCollector +level=INFO +handlers=consoleHandler,fileHandler +propagate=0 + ################################################################################ ### Handlers -- GitLab From 74b7ebf3b56c7f941fe83e20f23d5b032635f9a9 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 16:30:20 +0100 Subject: [PATCH 40/84] utils/git: make Git a top level API Signed-off-by: Patrick Bellasi --- libs/utils/__init__.py | 2 ++ libs/utils/git.py | 58 ++++++++++++++++++++++++++++++ libs/utils/wa_results_collector.py | 30 ++-------------- 3 files changed, 62 insertions(+), 28 deletions(-) create mode 100644 libs/utils/git.py diff --git a/libs/utils/__init__.py b/libs/utils/__init__.py index 294c6a063..7b34afc9c 100644 --- a/libs/utils/__init__.py +++ b/libs/utils/__init__.py @@ -31,4 +31,6 @@ from report import Report from analysis_register import AnalysisRegister from analysis_module import AnalysisModule +from git import Git + import android diff --git a/libs/utils/git.py b/libs/utils/git.py new file mode 100644 index 000000000..6a9e07c33 --- /dev/null +++ b/libs/utils/git.py @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2015, ARM Limited and contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import os +import subprocess + +class Git(object): + + @staticmethod + def find_shortest_symref(repo_path, sha1): + """ + Find the shortest symbolic reference (branch/tag) to a Git SHA1 + + :param repo_path: the path of a valid git repository + :type repo_path: str + + :param sha1: the SAH1 of a commit to lookup the reference for + :type sha1: str + + Returns None if nothing points to the requested SHA1 + """ + repo_path = os.path.expanduser(repo_path) + possibles = [] + # Can't use git for-each-ref --points-at because it only came in in Git 2.7 + # which is not in Ubuntu 14.04 - check by hand instead. + branches = subprocess.check_output( + "git for-each-ref --sort=-committerdate " + "--format='%(objectname:short) %(refname:short)' " + "refs/heads/ refs/remotes/ refs/tags", + cwd=repo_path, shell=True) + for line in branches.splitlines(): + try: + sha1_out, name = line.strip().split() + except: + continue + if sha1_out[:7] == sha1[:7]: + possibles.append(name) + if not possibles: + return None + + return min(possibles, key=len) + +# vim :set tabstop=4 shiftwidth=4 expandtab diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 88b22d154..c9b05f684 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -33,33 +33,7 @@ from devlib.target import KernelVersion from trappy.utils import handle_duplicate_index from trace import Trace - -def git_find_shortest_symref(repo_path, sha1_in): - """ - Find the shortest symbolic reference (branch/tag) to a Git SHA1 - - Returns None if nothing points to the requested SHA1 - """ - repo_path = os.path.expanduser(repo_path) - possibles = [] - # Can't use git for-each-ref --points-at because it only came in in Git 2.7 - # which is not in Ubuntu 14.04 - check by hand instead. - branches = subprocess.check_output( - "git for-each-ref --sort=-committerdate " - "--format='%(objectname:short) %(refname:short)' " - "refs/heads/ refs/remotes/ refs/tags", - cwd=repo_path, shell=True) - for line in branches.splitlines(): - try: - sha1_out, name = line.strip().split() - except: - continue - if sha1_out[:7] == sha1_in[:7]: - possibles.append(name) - if not possibles: - return None - - return min(possibles, key=len) +from git import Git class WaResultsCollector(object): """ @@ -138,7 +112,7 @@ class WaResultsCollector(object): kernel_refs = {} if kernel_repo_path: for sha1 in df['kernel_sha1'].unique(): - ref = git_find_shortest_symref(kernel_repo_path, sha1) + ref = Git.find_shortest_symref(kernel_repo_path, sha1) if ref: kernel_refs[sha1] = ref -- GitLab From 0578e11aea6fc7d5062b8ba57cbac2f2a425e228 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 17:01:48 +0100 Subject: [PATCH 41/84] utils/wa_results_collector: allow to specify WA folders by regexp Signed-off-by: Patrick Bellasi Changes by Brendan: - Changed wa_dirs default to .* - Re-expressed checks and extra init for wa_dirs and base_dir params - Reworded 'folder' -> 'directory' - Replaced use of 'os.walk' with manual 'os.listdir', because 'os.walk' doesn't provide a simple way to match on just the subdirectory (so if the wa_dirs regex matches base_dir, then we pick every directory). --- libs/utils/wa_results_collector.py | 56 ++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index c9b05f684..2ca942645 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -17,6 +17,7 @@ from collections import namedtuple, defaultdict import csv import json import numpy as np +import re import os import pandas as pd import subprocess @@ -76,7 +77,14 @@ class WaResultsCollector(object): Aside from the provided helper attributes, all metrics are exposed in a DataFrame as the ``results_df`` attribute. - :param wa_dirs: List of paths to WA3 output directories + :param wa_dirs: List of paths to WA3 output directories or a regexp of WA3 + output directories names to consider starting from the + specified base_path + :type wa_dirs: str + + :param base_dir: The path of a directory containing a collection of WA3 + output directories + :type base_dir: str :param platform: Optional LISA platform description. If provided, used to enrich extra metrics gleaned from trace analysis. @@ -93,18 +101,32 @@ class WaResultsCollector(object): cached in the provided output directories. Set this param to False to disable this caching. """ - def __init__(self, wa_dirs, platform=None, kernel_repo_path=None, - use_cached_trace_metrics=True): + def __init__(self, base_dir=None, wa_dirs=".*", platform=None, + kernel_repo_path=None, use_cached_trace_metrics=True): + self._log = logging.getLogger('WaResultsCollector') - self.platform = platform - self.use_cached_trace_metrics = use_cached_trace_metrics + if base_dir: + base_dir = os.path.expanduser(base_dir) + if not isinstance(wa_dirs, basestring): + raise ValueError( + 'If base_dir is provided, wa_dirs should be a regexp') + regex = wa_dirs + wa_dirs = self._list_wa_dirs(base_dir, regex) + if not wa_dirs: + raise ValueError("Couldn't find any WA results matching '{}' in {}" + .format(regex, base_dir)) + else: + if not hasattr(wa_dirs, '__iter__'): + raise ValueError( + 'if base_dir is not provided, wa_dirs should be a list of paths') - if not wa_dirs: - raise ValueError('Invalid wa_dirs ({})'.format(wa_dirs)) wa_dirs = [os.path.expanduser(p) for p in wa_dirs] + self.platform = platform + self.use_cached_trace_metrics = use_cached_trace_metrics + df = pd.DataFrame() for wa_dir in wa_dirs: df = df.append(self._read_wa_dir(wa_dir)) @@ -124,6 +146,26 @@ class WaResultsCollector(object): self.results_df = df + def _list_wa_dirs(self, base_dir, wa_dirs_re): + dirs = [] + self._log.info("Processing WA3 dirs matching [%s], rooted at %s", + wa_dirs_re, base_dir) + wa_dirs_re = re.compile(wa_dirs_re) + + for subdir in os.listdir(base_dir): + dir = os.path.join(base_dir, subdir) + if not os.path.isdir(dir) or not wa_dirs_re.search(subdir): + continue + + # WA3 results dirs contains a __meta directory at the top level. + if '__meta' not in os.listdir(dir): + self.log.warning('Ignoring {}, does not contain __meta directory') + continue + + dirs.append(dir) + + return dirs + def _read_wa_dir(self, wa_dir): """ Get a DataFrame of metrics from a single WA3 output directory. -- GitLab From 9a458edf200ce56f708a13651dce6e390b51b9bd Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 18:26:16 +0100 Subject: [PATCH 42/84] tools/wa_results_collector: nicely complain for not valid filter Let's warn the used when filters are not valid, instead of raising an confusing KeyError exception. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 2ca942645..ccc6d7211 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -22,6 +22,7 @@ import os import pandas as pd import subprocess import logging +import warnings from scipy.stats import ttest_ind import matplotlib.cm as cm @@ -494,9 +495,14 @@ class WaResultsCollector(object): :param by: List of identifiers to group output as in DataFrame.groupby. """ - df = (self._select(tag, kernel, test) - .groupby(['workload', 'metric']) - .get_group((workload, metric))) + df = self._select(tag, kernel, test) + if df.empty: + self._log.warn("No data to plot for (tag: %s, kernel: %s, test: %s)", + tag, kernel, test) + return None + + df = (df.groupby(['workload', 'metric']) + .get_group((workload, metric))) units = df['units'].unique() if len(units) > 1: @@ -582,9 +588,14 @@ class WaResultsCollector(object): :param by: List of identifiers to group output as in DataFrame.groupby. """ - df = (self._select(tag, kernel, test) - .groupby(['workload', 'metric']) - .get_group((workload, metric))) + df = self._select(tag, kernel, test) + if df.empty: + self._log.warning("No data to plot for (tag: %s, kernel: %s, test: %s)", + tag, kernel, test) + return None + + df = (df.groupby(['workload', 'metric']) + .get_group((workload, metric))) units = df['units'].unique() if len(units) > 1: -- GitLab From 5c35aebc534831f03ff2cc517ed4d71b48afddce Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 14:39:12 +0100 Subject: [PATCH 43/84] tools/wa_results_collector: Don't spam CDF data --- libs/utils/wa_results_collector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index ccc6d7211..c093ea084 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -622,7 +622,7 @@ class WaResultsCollector(object): lines.append(ax.lines[-1]) axes.axhline(y=cdf.below, linewidth=1, linestyle='--', color=color) - self._log.info("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below) + self._log.debug("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below) axes.grid(True) axes.legend(lines, labels) -- GitLab From 07b8050bc6f92199ff5ca0eea518e566f375d2d4 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 18:41:13 +0100 Subject: [PATCH 44/84] tools/wa_results_collector: let's be more friendly with filters definition This allows to specify "pelt" instead of ".*pelt.*" to focus for example on experiments which have been executed on a PELT enabled kernel. We can still use RE, thus "list_view.*pelt" is still a valid filter. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index c093ea084..4b85869f7 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -446,9 +446,9 @@ class WaResultsCollector(object): def _select(self, tag='.*', kernel='.*', test='.*'): _df = self.results_df - _df = _df[_df.tag.str.match(tag)] - _df = _df[_df.kernel.str.match(kernel)] - _df = _df[_df.test.str.match(test)] + _df = _df[_df.tag.str.contains(tag)] + _df = _df[_df.kernel.str.contains(kernel)] + _df = _df[_df.test.str.contains(test)] return _df @property -- GitLab From 401dcd0ba8346b5ecebb6bea76a3d998de160992 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 18:57:38 +0100 Subject: [PATCH 45/84] tools/wa_results_collector: nicely complain for not valid parameters Let's warn the users about the workloads/metrics which are available for plotting once the results DF has been filtered. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 4b85869f7..3a172315f 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -501,6 +501,21 @@ class WaResultsCollector(object): tag, kernel, test) return None + valid_workloads = df.workload.unique() + if workload not in valid_workloads: + self._log.warning("No data for [%s] workload", workload) + self._log.info("Workloads with data, for the specified filters, are:") + self._log.info(" %s", ','.join(valid_workloads)) + return None + + valid_metrics = df.metric.unique() + if metric not in valid_metrics: + self._log.warning("No metric [%s] collected for workload [%s]", + metric, workload) + self._log.info("Metrics with data, for the specified filters, are:") + self._log.info(" %s", ', '.join(valid_metrics)) + return None + df = (df.groupby(['workload', 'metric']) .get_group((workload, metric))) -- GitLab From d94e0dc46bf4b509aefe3ae69fbe13384a54eb66 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 19:50:46 +0100 Subject: [PATCH 46/84] tools/wa_results_collector: use a more compact plot title Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 3a172315f..7c398ee23 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -549,10 +549,11 @@ class WaResultsCollector(object): # Plot boxes sorted by mean fig, axes = plt.subplots(figsize=(16,8)) _df.boxplot(ax=axes, vert=False, showmeans=True) - fig.suptitle('"{}" for workload "{}"'.format(metric, workload)) + fig.suptitle('') if xlim: axes.set_xlim(xlim) axes.set_xlabel('{} [{}]'.format(metric, units)) + axes.set_title('{}:{}'.format(workload, metric)) plt.show() CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below']) -- GitLab From 19e50859b32183d4271a70195aadd92debac836b Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 14:47:59 +0100 Subject: [PATCH 47/84] tools/wa_results_collector: Factor out _get_metric_df helper --- libs/utils/wa_results_collector.py | 77 ++++++++++++++---------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 7c398ee23..b83fc26f3 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -474,27 +474,10 @@ class WaResultsCollector(object): .groupby('workload').get_group(workload) ['metric'].unique()) - def do_boxplots(self, workload, metric, - tag='.*', kernel='.*', test='.*', - by=['test', 'tag', 'kernel'], xlim=None): + def _get_metric_df(self, workload, metric, tag, kernel, test): """ - Display boxplots of a certain metric - - Creates horizontal boxplots of metrics in the results. Check - ``workloads`` and ``workload_available_metrics`` to find the available - workloads and metrics. Check ``tags``, ``tests`` and ``kernels`` - to find the names that results can be filtered against. - - :param workload: Name of workload to display metrics for - :param metric: Name of metric to display - - :param tag: regular expression to filter tags that should be plotted - :param kernel: regular expression to filter kernels that should be plotted - :param tag: regular expression to filter tags that should be plotted - - :param by: List of identifiers to group output as in DataFrame.groupby. + Common helper for getting results to plot for a given metric """ - df = self._select(tag, kernel, test) if df.empty: self._log.warn("No data to plot for (tag: %s, kernel: %s, test: %s)", @@ -507,23 +490,47 @@ class WaResultsCollector(object): self._log.info("Workloads with data, for the specified filters, are:") self._log.info(" %s", ','.join(valid_workloads)) return None + df = df[df['workload'] == workload] valid_metrics = df.metric.unique() if metric not in valid_metrics: - self._log.warning("No metric [%s] collected for workload [%s]", + self._log.warning("No metric [%s] collected for workoad [%s]", metric, workload) - self._log.info("Metrics with data, for the specified filters, are:") + self._log.info("Metrics with data, for the specied filters, are:") self._log.info(" %s", ', '.join(valid_metrics)) return None - - df = (df.groupby(['workload', 'metric']) - .get_group((workload, metric))) + df = df[df['metric'] == metric] units = df['units'].unique() if len(units) > 1: raise RuntimError('Found different units for workload "{}" metric "{}": {}' .format(workload, metric, units)) - [units] = units + + return df + + def do_boxplots(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], xlim=None): + """ + Display boxplots of a certain metric + + Creates horizontal boxplots of metrics in the results. Check + ``workloads`` and ``workload_available_metrics`` to find the available + workloads and metrics. Check ``tags``, ``tests`` and ``kernels`` + to find the names that results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return # Sort groups by mean duration - this will be the order of the plots gb = df.groupby(by) @@ -552,6 +559,7 @@ class WaResultsCollector(object): fig.suptitle('') if xlim: axes.set_xlim(xlim) + [units] = df['units'].unique() axes.set_xlabel('{} [{}]'.format(metric, units)) axes.set_title('{}:{}'.format(workload, metric)) plt.show() @@ -603,21 +611,9 @@ class WaResultsCollector(object): :param by: List of identifiers to group output as in DataFrame.groupby. """ - - df = self._select(tag, kernel, test) - if df.empty: - self._log.warning("No data to plot for (tag: %s, kernel: %s, test: %s)", - tag, kernel, test) - return None - - df = (df.groupby(['workload', 'metric']) - .get_group((workload, metric))) - - units = df['units'].unique() - if len(units) > 1: - raise RuntimError('Found different units for workload "{}" metric "{}": {}' - .format(workload, metric, units)) - [units] = units + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return test_cnt = len(df.groupby(['test', 'tag', 'kernel'])) colors = iter(cm.rainbow(np.linspace(0, 1, test_cnt+1))) @@ -631,6 +627,7 @@ class WaResultsCollector(object): labels.append("{:16s}: {:32s}".format(keys[2], keys[1])) color = next(colors) cdf = self._get_cdf(df['value'], threshold) + [units] = df['units'].unique() ax = cdf.df.plot(ax=axes, legend=False, xlim=(0,None), figsize=(16, 6), title='Total duration CDF ({:.1f}% within {} [{}] threshold)'\ .format(100. * cdf.below, threshold, units), -- GitLab From a8b8ea3f2850f72050527d83405afa87796c32c2 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 14:54:17 +0100 Subject: [PATCH 48/84] tools/wa_results_collector: Remove outdated comment --- libs/utils/wa_results_collector.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index b83fc26f3..9fbe348bf 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -532,7 +532,6 @@ class WaResultsCollector(object): if df is None: return - # Sort groups by mean duration - this will be the order of the plots gb = df.groupby(by) # Convert the groupby into a DataFrame with a column for each group -- GitLab From f89e13468860abc6d318a560c2af59794ac19812 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 13 Oct 2017 19:52:20 +0100 Subject: [PATCH 49/84] tools/wa_results_collector: compute and report stats This allows to plot and report a corresponding table with something like: for test in collector.tests(workload='jankbench'): logging.info("Test: %s", test) stats_df = collector.do_boxplots(workload='jankbench', metric='device_total_energy', test=test) display(stats_df) Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 9fbe348bf..124a3b49b 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -563,6 +563,10 @@ class WaResultsCollector(object): axes.set_title('{}:{}'.format(workload, metric)) plt.show() + stats_df = _df.describe(percentiles=[0.75, 0.95, 0.99])\ + .T.sort_values(['mean']) + return stats_df + CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below']) def _get_cdf(self, data, threshold): -- GitLab From 813466cfefb07d0b2e7f35325c38a4a3321a0f59 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 16:00:50 +0100 Subject: [PATCH 50/84] tools/wa_results_collector: Fixup test/inv_id column in comparisons DF --- libs/utils/wa_results_collector.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 124a3b49b..8d4c77462 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -657,7 +657,7 @@ class WaResultsCollector(object): # I dunno why I wrote this with a namedtuple instead of just a dict or # whatever, but it works fine - Comparison = namedtuple('Comparison', ['metric', 'test', + Comparison = namedtuple('Comparison', ['metric', 'test', 'inv_id', 'base_id', 'base_mean', 'base_std', 'new_id', 'new_mean', 'new_std', 'diff', 'diff_pct', 'pvalue']) @@ -684,13 +684,13 @@ class WaResultsCollector(object): # depending on the `by` param. # So wl_inv_results will be the results entries for that workload on # that kernel/tag - for (workload, inv_id), wl_inv_results in metric_results.groupby(['test', invariant]): + for (test, inv_id), wl_inv_results in metric_results.groupby(['test', invariant]): gb = wl_inv_results.groupby(by)['value'] if base_id not in gb.groups: - self._log.warning('Skipping - No baseline results for workload ' + self._log.warning('Skipping - No baseline results for test ' '[%s] %s [%s] metric [%s]', - workload, invariant, inv_id, metric) + test, invariant, inv_id, metric) continue base_results = gb.get_group(base_id) @@ -702,9 +702,9 @@ class WaResultsCollector(object): # group_id is now a kernel id or a tag (depending on # `by`). group_results is a slice of all the rows of self.results_df - # for a given metric, workload, tag/workload tuple. We + # for a given metric, test, tag/test tuple. We # create comparison object to show how that metric changed - # wrt. to the base tag/workload. + # wrt. to the base tag/test. group_mean = group_results.mean() mean_diff = group_mean - base_mean @@ -740,7 +740,7 @@ class WaResultsCollector(object): pvalue = ttest_ind(group_results, base_results, equal_var=False).pvalue comparisons.append(Comparison( - metric, '_'.join([workload, str(inv_id)]), + metric, test, inv_id, base_id, base_mean, base_results.std(), group_id, group_mean, group_results.std(), mean_diff, mean_diff_pct, pvalue)) -- GitLab From 0737da5d27b1ca04ea1b517b46cdf0fa46564833 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 16:06:22 +0100 Subject: [PATCH 51/84] tools/wa_results_collector: Separate invariant in comparison plots This means that if you are comparing by kernel, you get separate plots for different tags (e.g. you got one plot for Jankbench list view PELT and one for Jankbench list view WALT). --- libs/utils/wa_results_collector.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 8d4c77462..ec9bb8de5 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -765,7 +765,7 @@ class WaResultsCollector(object): return # Separate plot for each test (e.g. one plot for Jankbench list_view) - for test, test_comparisons in df.groupby('test'): + for (test, inv_id), test_comparisons in df.groupby(('test', 'inv_id')): # Vertical size of plot depends on how many metrics we're comparing # and how many things (kernels/tags) we're comparing metrics for. # a.k.a the total length of the comparisons df. @@ -804,8 +804,8 @@ class WaResultsCollector(object): # Add some text for labels, title and axes ticks ax.set_xlabel('Percent difference') [baseline] = test_comparisons['base_id'].unique() - ax.set_title('{}: Percent difference compared to {} \nopacity depicts p-value' - .format(test, baseline)) + ax.set_title('{} ({}): Percent difference compared to {} \nopacity depicts p-value' + .format(test, inv_id, baseline)) ax.set_yticklabels(gdf['metric']) ax.set_yticks(pos + thickness / 2) # ax.set_xlim((-50, 50)) -- GitLab From abc0d44a8912412a1c95897999335031464b6eb2 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 16:34:14 +0100 Subject: [PATCH 52/84] tools/wa_results_collector: Improve colours in comparisons plot --- libs/utils/wa_results_collector.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index ec9bb8de5..0942498ca 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -787,8 +787,10 @@ class WaResultsCollector(object): # TODO: something is up with the calculations above, because there's # always a bit of empty space at the bottom of the axes. - colors = ['r', 'g', 'b'] - for i, (group, gdf) in enumerate(test_comparisons.groupby('new_id')): + + gb = test_comparisons.groupby('new_id') + colors = cm.rainbow(np.linspace(0, 1, len(gb))) + for i, (group, gdf) in enumerate(gb): # For each of the things we're comparing we'll plot a bar chart # but slightly shifted. That's how we get multiple bars on each # y-axis point. -- GitLab From 810b3b390531d7f5b9b9f53cc61d5fbec8c2da74 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 16:34:18 +0100 Subject: [PATCH 53/84] tools/wa_results_collector: Cosmetics --- libs/utils/wa_results_collector.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 0942498ca..3218f28b1 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -795,8 +795,8 @@ class WaResultsCollector(object): # but slightly shifted. That's how we get multiple bars on each # y-axis point. bars = ax.barh(bottom=pos + (i * thickness), width=gdf['diff_pct'], - height=thickness, label=group, - color=colors[i % len(colors)], align='center') + height=thickness, label=group, + color=colors[i % len(colors)], align='center') # Decrease the opacity for comparisons with a high p-value # TODO: This also decreases the opacity on the legend. I don't # really know what to do about that. -- GitLab From 4510ca69698418fa433608d1d74c6bd970c47ef5 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 16:57:39 +0100 Subject: [PATCH 54/84] tools/wa_results_collector: Fix opacity of legend colours The trick in plot_comparisons which sets opacity on bars to represent p-value has an issue in that the opacity of the first (lowest) bar sets the opacity of the colours in the legend, which makes it very hard to read. I don't think there's a nice way around this, so I've added a dummy set of bars at the bottom of the plot, you can't see them because their value is 0, but their opacity is 1, so the legend colours are fixed. Sorry, this is one of the worst hacks I've ever committed, and it creates extra empty space on the plot which I can't figure out how to get rid of. --- libs/utils/wa_results_collector.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 3218f28b1..75f6de93a 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -772,9 +772,14 @@ class WaResultsCollector(object): fig, ax = plt.subplots(figsize=(15, len(test_comparisons) / 2.)) # pos is used as the Y-axis. The y-axis is a descrete axis with a - # point for each of the metrics we're comparing + # point for each of the metrics we're comparing. matplotlib needs + # that in numerical form. + # We also have one more tick on the Y-axis than we actually need - + # this is a terrible hack which is necessary because when we set the + # opacity of the first bar, it sets the opacity of the legend. So we + # introduce a dummy bar with a value of 0 and an opacity of 1. num_metrics = len(test_comparisons['metric'].unique()) - pos = np.arange(num_metrics) + pos = np.arange(-1, num_metrics) # At each point on the discrete y-axis we'll have one bar for each # comparison: one per kernel/tag (depending on the `by` param), minus @@ -794,13 +799,18 @@ class WaResultsCollector(object): # For each of the things we're comparing we'll plot a bar chart # but slightly shifted. That's how we get multiple bars on each # y-axis point. - bars = ax.barh(bottom=pos + (i * thickness), width=gdf['diff_pct'], + bars = ax.barh(bottom=pos + (i * thickness), + # Add a dummy [0] entry so we can fix the opacity + # of the legend + width=[0] + gdf['diff_pct'].tolist(), height=thickness, label=group, color=colors[i % len(colors)], align='center') # Decrease the opacity for comparisons with a high p-value - # TODO: This also decreases the opacity on the legend. I don't - # really know what to do about that. - for bar, pvalue in zip(bars, gdf['pvalue']): + # We add a dummy [0] (which means opacity=1.0) as a terrible + # workaround for the fact that the first bar's opacity also sets + # the opacity for that bar in the legend, which makes it hard to + # read. + for bar, pvalue in zip(bars, [0] + gdf['pvalue'].tolist()): bar.set_alpha(1 - (min(pvalue * 10, 0.95))) # Add some text for labels, title and axes ticks @@ -808,7 +818,9 @@ class WaResultsCollector(object): [baseline] = test_comparisons['base_id'].unique() ax.set_title('{} ({}): Percent difference compared to {} \nopacity depicts p-value' .format(test, inv_id, baseline)) - ax.set_yticklabels(gdf['metric']) + # The '' label is for the dummy first bar, which we used as a + # workaround for setting the opacity of the legend + ax.set_yticklabels([''] + gdf['metric'].tolist()) ax.set_yticks(pos + thickness / 2) # ax.set_xlim((-50, 50)) ax.legend(loc='best') -- GitLab From ce548ce4d0acaf77a5d74f29aef15da1f28cd272 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 17:18:09 +0100 Subject: [PATCH 55/84] tools/wa_results_collector: Pop 'test' classifier to shorten tag --- libs/utils/wa_results_collector.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 75f6de93a..068bca3d9 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -225,21 +225,11 @@ class WaResultsCollector(object): # identify the runtime configuration. If not, use a representation # of the full key=value pairs. classifiers = job['classifiers'] or {} - rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems()) - tag = classifiers.get('tag', rich_tag) - - if job_id in tag_map: - # Double check I didn't do a stupid - if tag_map[job_id] != tag: - raise RuntimeError('Multiple tags ({}, {}) found for job ID {}' - .format(tag, tag_map[job_id], job_id)) - - tag_map[job_id] = tag if 'test' in classifiers: # If the workload spec has a 'test' classifier, use that to # identify it. - test = classifiers['test'] + test = classifiers.pop('test') elif 'test' in job['workload_parameters']: # If not, some workloads have a 'test' workload_parameter, try # using that @@ -250,12 +240,21 @@ class WaResultsCollector(object): # different workload parameters will be amalgamated. test = workload + rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems()) + tag = classifiers.get('tag', rich_tag) + + if job_id in tag_map: + # Double check I didn't do a stupid + if tag_map[job_id] != tag: + raise RuntimeError('Multiple tags ({}, {}) found for job ID {}' + .format(tag, tag_map[job_id], job_id)) + tag_map[job_id] = tag + if job_id in test_map: # Double check I didn't do a stupid if test_map[job_id] != test: raise RuntimeError('Multiple tests ({}, {}) found for job ID {}' .format(test, test_map[job_id], job_id)) - test_map[job_id] = test iteration = next_iteration[job_id] -- GitLab From bca9b81500144f1de1e56498d4d40159c7d4104d Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Mon, 16 Oct 2017 17:08:33 +0100 Subject: [PATCH 56/84] tools/wa_results_collector: move stats generation into a dedicated function This move the generation of statistics for a given metric into a dedicated function describe(). The additional convenience method report() is added to wrap boxplot and statistics generation in a single call. The original do_boxplots() method has been renamed to be just boxplot to better match API concepts already exposed by PANDAs. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 78 +++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 6 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 068bca3d9..2e7af6a1f 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -34,6 +34,8 @@ from bart.common.Utils import area_under_curve from devlib.target import KernelVersion from trappy.utils import handle_duplicate_index +from IPython.display import display + from trace import Trace from git import Git @@ -507,9 +509,9 @@ class WaResultsCollector(object): return df - def do_boxplots(self, workload, metric, - tag='.*', kernel='.*', test='.*', - by=['test', 'tag', 'kernel'], xlim=None): + def boxplot(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], xlim=None): """ Display boxplots of a certain metric @@ -562,9 +564,73 @@ class WaResultsCollector(object): axes.set_title('{}:{}'.format(workload, metric)) plt.show() - stats_df = _df.describe(percentiles=[0.75, 0.95, 0.99])\ - .T.sort_values(['mean']) - return stats_df + return axes + + def describe(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel']): + """ + Return a DataFrame of statistics for a certain metric + + Compute mean, std, min, max and [50, 75, 95, 99] percentiles for + the values collected on each iteration of the specified metric. + + Check ``workloads`` and ``workload_available_metrics`` to find the + available workloads and metrics. + Check ``tags``, ``tests`` and ``kernels`` to find the names that + results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + df = self._get_metric_df(workload, metric, tag, kernel, test) + if df is None: + return + + grouped = df.groupby(by)['value'] + stats_df = pd.DataFrame( + grouped.describe(percentiles=[0.75, 0.95, 0.99])) + stats_df.rename(columns={'value': metric}, inplace=True) + stats_df = stats_df.unstack() + + return stats_df.sort_values(by=[(metric, 'mean')], ascending=True) + + def report(self, workload, metric, + tag='.*', kernel='.*', test='.*', + by=['test', 'tag', 'kernel'], xlim=None): + """ + Report a boxplot and a set of statistics for a certain metrick + + This is a convenience method to call both ``boxplot`` and ``describe`` + at the same time to get a consistent graphical and numerical + representation of the values for the specified metric. + + Check ``workloads`` and ``workload_available_metrics`` to find the + available workloads and metrics. + Check ``tags``, ``tests`` and ``kernels`` to find the names that + results can be filtered against. + + :param workload: Name of workload to display metrics for + :param metric: Name of metric to display + + :param tag: regular expression to filter tags that should be plotted + :param kernel: regular expression to filter kernels that should be plotted + :param tag: regular expression to filter tags that should be plotted + + :param by: List of identifiers to group output as in DataFrame.groupby. + """ + axes = self.boxplot(workload, metric, tag, kernel, test, by, xlim) + stats_df = self.describe(workload, metric, tag, kernel, test, by) + display(stats_df) + + return (axes, stats_df) + CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below']) -- GitLab From b53f122c181087fce596fdda744b6efb4349e026 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 16 Oct 2017 17:30:14 +0100 Subject: [PATCH 57/84] tools/wa_results_collector: typo --- libs/utils/wa_results_collector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 2e7af6a1f..7ad3167af 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -56,7 +56,7 @@ class WaResultsCollector(object): - 'workload' is the general name of a workload such as 'jankbench' or 'youtube'. - - 'test' is as more specific identification for workload - for example this + - 'test' is a more specific identification for workload - for example this might identify one of Jankbench's sub-benchmarks, or specifically playing a certain video on Youtube for 30s. -- GitLab From f9233820083a1410775e4c2183016f9bb0128438 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 17 Oct 2017 10:42:46 +0100 Subject: [PATCH 58/84] tools/wa_results_collector: Remove bug workaround no longer needed --- libs/utils/wa_results_collector.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 7ad3167af..8f9256cfd 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -898,13 +898,6 @@ class WaResultsCollector(object): with open(os.path.join(job_dir, 'result.json')) as f: ret = {a['name']: os.path.join(job_dir, a['path']) for a in json.load(f)['artifacts']} - - # TODO: This is a workaround for a WA3 but that is now - # fixed (a65cffb705B1 - trace-cmd: add trace files as artificats) - # I'm just leaving it here so I can keep comparing data from runs before - # that fix went in. Once I don't need it any more, I'll remove it. - if 'trace.dat' in os.listdir(job_dir): - ret['trace-cmd-bin'] = os.path.join(job_dir, 'trace.dat') return ret def _find_job_dir(self, workload='.*', tag='.*', kernel='.*', test='.*', -- GitLab From 3e47b82c742a455da6b1f97005ea6e4b59a458b7 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 17 Oct 2017 11:08:14 +0100 Subject: [PATCH 59/84] tools/wa_results_collector: typo --- libs/utils/wa_results_collector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 8f9256cfd..b5510cfa4 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -836,7 +836,7 @@ class WaResultsCollector(object): # a.k.a the total length of the comparisons df. fig, ax = plt.subplots(figsize=(15, len(test_comparisons) / 2.)) - # pos is used as the Y-axis. The y-axis is a descrete axis with a + # pos is used as the Y-axis. The y-axis is a discrete axis with a # point for each of the metrics we're comparing. matplotlib needs # that in numerical form. # We also have one more tick on the Y-axis than we actually need - -- GitLab From 1b20d4cbd4116d2402e372c96957667b43725e4a Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 17 Oct 2017 11:08:37 +0100 Subject: [PATCH 60/84] tools/wa_results_collector: Fix comparison plot for missing data --- libs/utils/wa_results_collector.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index b5510cfa4..970ac1006 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -843,8 +843,8 @@ class WaResultsCollector(object): # this is a terrible hack which is necessary because when we set the # opacity of the first bar, it sets the opacity of the legend. So we # introduce a dummy bar with a value of 0 and an opacity of 1. - num_metrics = len(test_comparisons['metric'].unique()) - pos = np.arange(-1, num_metrics) + all_metrics = test_comparisons['metric'].unique() + pos = np.arange(-1, len(all_metrics)) # At each point on the discrete y-axis we'll have one bar for each # comparison: one per kernel/tag (depending on the `by` param), minus @@ -861,6 +861,20 @@ class WaResultsCollector(object): gb = test_comparisons.groupby('new_id') colors = cm.rainbow(np.linspace(0, 1, len(gb))) for i, (group, gdf) in enumerate(gb): + def get_dummy_row(metric): + return pd.DataFrame({col: 0 for col in gdf.columns}, index=[metric]) + + missing_metrics = set(all_metrics) - set(gdf['metric'].unique()) + gdf = gdf.set_index('metric') + for missing_metric in missing_metrics: + self._log.warning( + "Data missing, can't compare metric [{}] for {} [{}]" + .format(missing_metric, by, group)) + gdf = gdf.append(get_dummy_row(missing_metric)) + + # Ensure the comparisons are in the same order for each group + gdf = gdf.reindex(all_metrics) + # For each of the things we're comparing we'll plot a bar chart # but slightly shifted. That's how we get multiple bars on each # y-axis point. @@ -885,7 +899,7 @@ class WaResultsCollector(object): .format(test, inv_id, baseline)) # The '' label is for the dummy first bar, which we used as a # workaround for setting the opacity of the legend - ax.set_yticklabels([''] + gdf['metric'].tolist()) + ax.set_yticklabels([''] + gdf.index.tolist()) ax.set_yticks(pos + thickness / 2) # ax.set_xlim((-50, 50)) ax.legend(loc='best') -- GitLab From 783e99a40acb0a12ee5fda82f5e6852f7c08835c Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 17 Oct 2017 11:18:57 +0100 Subject: [PATCH 61/84] tools/wa_results_collector: Clean up legend opacity hack --- libs/utils/wa_results_collector.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 970ac1006..15ffb71b3 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -875,21 +875,18 @@ class WaResultsCollector(object): # Ensure the comparisons are in the same order for each group gdf = gdf.reindex(all_metrics) + # Append the dummy row we're using to fix the legend opacity + gdf = get_dummy_row('').append(gdf) + # For each of the things we're comparing we'll plot a bar chart # but slightly shifted. That's how we get multiple bars on each # y-axis point. bars = ax.barh(bottom=pos + (i * thickness), - # Add a dummy [0] entry so we can fix the opacity - # of the legend - width=[0] + gdf['diff_pct'].tolist(), + width=gdf['diff_pct'], height=thickness, label=group, color=colors[i % len(colors)], align='center') # Decrease the opacity for comparisons with a high p-value - # We add a dummy [0] (which means opacity=1.0) as a terrible - # workaround for the fact that the first bar's opacity also sets - # the opacity for that bar in the legend, which makes it hard to - # read. - for bar, pvalue in zip(bars, [0] + gdf['pvalue'].tolist()): + for bar, pvalue in zip(bars, gdf['pvalue']): bar.set_alpha(1 - (min(pvalue * 10, 0.95))) # Add some text for labels, title and axes ticks @@ -897,9 +894,7 @@ class WaResultsCollector(object): [baseline] = test_comparisons['base_id'].unique() ax.set_title('{} ({}): Percent difference compared to {} \nopacity depicts p-value' .format(test, inv_id, baseline)) - # The '' label is for the dummy first bar, which we used as a - # workaround for setting the opacity of the legend - ax.set_yticklabels([''] + gdf.index.tolist()) + ax.set_yticklabels(gdf.index.tolist()) ax.set_yticks(pos + thickness / 2) # ax.set_xlim((-50, 50)) ax.legend(loc='best') -- GitLab From 6f5ccd614dca41f4f149295b42bb5134e6b94a8e Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 17 Oct 2017 11:34:16 +0100 Subject: [PATCH 62/84] utils/wa_results_collector: allow user to skip trace parsing Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 15ffb71b3..fcc37e989 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -98,6 +98,11 @@ class WaResultsCollector(object): references to replace SHA1s in data representation. This is purely to make the output more manageable for humans. + :param parse_traces: This class uses LISA to parse and analyse ftrace files + for extra metrics. With multiple/large traces this + can take some time. Set this param to False to disable + trace parsing. + :param use_cached_trace_metrics: This class uses LISA to parse and analyse ftrace files for extra metrics. With multiple/large traces this can take some time, so the extracted metrics are @@ -105,7 +110,8 @@ class WaResultsCollector(object): to False to disable this caching. """ def __init__(self, base_dir=None, wa_dirs=".*", platform=None, - kernel_repo_path=None, use_cached_trace_metrics=True): + kernel_repo_path=None, parse_traces=True, + use_cached_trace_metrics=True): self._log = logging.getLogger('WaResultsCollector') @@ -128,6 +134,9 @@ class WaResultsCollector(object): wa_dirs = [os.path.expanduser(p) for p in wa_dirs] self.platform = platform + self.parse_traces = parse_traces + if not self.parse_traces: + self._log.warning("Trace parsing disabled") self.use_cached_trace_metrics = use_cached_trace_metrics df = pd.DataFrame() @@ -412,7 +421,7 @@ class WaResultsCollector(object): metrics_df = pd.DataFrame() artifacts = self._read_artifacts(job_dir) - if 'trace-cmd-bin' in artifacts: + if self.parse_traces and 'trace-cmd-bin' in artifacts: metrics_df = metrics_df.append( self._get_trace_metrics(artifacts['trace-cmd-bin'])) -- GitLab From 579da30427c970ac600aebbbe4441a62c48a2c3f Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 17 Oct 2017 12:19:09 +0100 Subject: [PATCH 63/84] utils/wa_results_collector: be less verbose on reporting failed jobs This resumes in less log lines the same information about skipped jobs. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index fcc37e989..4344ba796 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -220,6 +220,9 @@ class WaResultsCollector(object): # isn't necessary). next_iteration = defaultdict(lambda: 1) + # Keep track of which jobs we skipped for each iteration + skipped_jobs = defaultdict(lambda: []) + # Dicts mapping job IDs to things determined about the job - this will # be used to add extra columns to the DataFrame (that aren't reported # directly in WA's results.csv) @@ -282,8 +285,7 @@ class WaResultsCollector(object): with open(os.path.join(job_dir, 'result.json')) as f: job_result = json.load(f) if job_result['status'] == 'FAILED': - self._log.warning('Skipping failed iteration %s of job %s', - iteration, job_id) + skipped_jobs[iteration].append(job_id) continue extra_df = self._get_extra_job_metrics(job_dir, workload) @@ -298,6 +300,10 @@ class WaResultsCollector(object): df = df.append(extra_df) + for iteration, job_ids in skipped_jobs.iteritems(): + self._log.warning("Skipped failed iteration %d for jobs:", iteration) + self._log.warning(" %s", ', '.join(job_ids)) + df['tag'] = df['id'].replace(tag_map) df['test'] = df['id'].replace(test_map) # TODO: This is a bit lazy: we're storing the directory that every -- GitLab From f1d5ae5c9d88ea1321c9cce717ca7813818e0c6b Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Tue, 17 Oct 2017 15:10:26 +0100 Subject: [PATCH 64/84] utils/wa_results_collector: allow user to select the sorting criteria This allow the user to boxplot and report experiments ordered on a specified statistic ('mean' by default) as well as with ascending or descending order. The ordering is kept in sync across a report() call so that boxplot ordering alwasy matches tabular data. If a non standard percentile is specified, an additional columns is generated in the reported statistict. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 101 ++++++++++++++++++++++++----- 1 file changed, 84 insertions(+), 17 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 4344ba796..bcc6d77bf 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -524,9 +524,37 @@ class WaResultsCollector(object): return df + + SortBy = namedtuple('SortBy', ['key', 'params', 'column']) + + def _get_sort_params(self, sort_on): + """ + Validate a sort criteria and return the parameters required by the + boxplot and report methods. + """ + valid_sort = ['count', 'mean', 'std', 'min', 'max'] + + # Verify if valid percentile string has been required + match = re.match('^(?P\d{1,3})\%$', sort_on) + if match: + quantile = int(match.group('quantile')) + if quantile < 1 or quantile > 100: + raise ValueError("Error sorting data: Quantile value out of range [1..100]") + return self.SortBy('quantile', {'q': quantile/100.}, sort_on) + + # Otherwise, verify if it's a valid Pandas::describe()'s column name + if sort_on in valid_sort: + return self.SortBy(sort_on, {}, sort_on) + + raise ValueError( + "sort_on={} not supported, allowed values are percentile or {}" + .format(sort_on, valid_sort)) + def boxplot(self, workload, metric, tag='.*', kernel='.*', test='.*', - by=['test', 'tag', 'kernel'], xlim=None): + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False, + xlim=None): """ Display boxplots of a certain metric @@ -535,6 +563,9 @@ class WaResultsCollector(object): workloads and metrics. Check ``tags``, ``tests`` and ``kernels`` to find the names that results can be filtered against. + By default, the box with the lowest mean value is plotted at the top of + the graph, this can be customized with ``sort_on`` and ``ascending``. + :param workload: Name of workload to display metrics for :param metric: Name of metric to display @@ -543,11 +574,21 @@ class WaResultsCollector(object): :param tag: regular expression to filter tags that should be plotted :param by: List of identifiers to group output as in DataFrame.groupby. + + :param sort_on: Name of the statistic to order data for. + Supported values are: count, mean, std, min, max. + You may alternatively specify a percentile to sort on, + this should be an integer in the range [1..100] + formatted as a percentage, e.g. 95% is the 95th + percentile. + :param ascending: When True, boxplots are plotted by increasing values + (lowest-valued boxplot at the top of the graph) of the + specified `sort_on` statistic. """ + sp = self._get_sort_params(sort_on) df = self._get_metric_df(workload, metric, tag, kernel, test) if df is None: return - gb = df.groupby(by) # Convert the groupby into a DataFrame with a column for each group @@ -562,11 +603,15 @@ class WaResultsCollector(object): col.index = np.arange(max_group_size) _df[group_name] = col - # Sort the columns so that the groups with the lowest mean get plotted - # at the top - avgs = _df.mean() - avgs = avgs.sort_values(ascending=False) - _df = _df[avgs.index] + # Sort the columns + # With default params this puts the box with the lowest mean at the + # bottom. + # NOTE: the not(ascending) condition is required to keep these plots + # aligned with the way describe() reports the stats corresponding to + # each boxplot + sorted_df = getattr(_df, sp.key)(**sp.params) + sorted_df = sorted_df.sort_values(ascending=not(ascending)) + _df = _df[sorted_df.index] # Plot boxes sorted by mean fig, axes = plt.subplots(figsize=(16,8)) @@ -583,7 +628,8 @@ class WaResultsCollector(object): def describe(self, workload, metric, tag='.*', kernel='.*', test='.*', - by=['test', 'tag', 'kernel']): + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False): """ Return a DataFrame of statistics for a certain metric @@ -603,24 +649,44 @@ class WaResultsCollector(object): :param tag: regular expression to filter tags that should be plotted :param by: List of identifiers to group output as in DataFrame.groupby. + + :param sort_on: Name of the statistic to order data for. + Supported values are: count, mean, std, min, max. + It's also supported at the usage of a percentile value, + which has to be an integer in the range [1..100] and + formatted as a percentage, + e.g. 95% is the 95th percentile. + :param ascending: When True, the statistics are reported by increasing values + of the specified `sort_on` column """ + sp = self._get_sort_params(sort_on) df = self._get_metric_df(workload, metric, tag, kernel, test) if df is None: return + # Add the eventually required additional percentile + percentiles = [0.75, 0.95, 0.99] + if sp.params and 'q' in sp.params: + percentiles.append(sp.params['q']) + percentiles = sorted(list(set(percentiles))) + grouped = df.groupby(by)['value'] stats_df = pd.DataFrame( - grouped.describe(percentiles=[0.75, 0.95, 0.99])) - stats_df.rename(columns={'value': metric}, inplace=True) + grouped.describe(percentiles=percentiles)) stats_df = stats_df.unstack() + stats_df.sort_values(by=[('value', sp.column)], + ascending=ascending, inplace=True) + stats_df.rename(columns={'value': metric}, inplace=True) - return stats_df.sort_values(by=[(metric, 'mean')], ascending=True) + return stats_df def report(self, workload, metric, tag='.*', kernel='.*', test='.*', - by=['test', 'tag', 'kernel'], xlim=None): + by=['test', 'tag', 'kernel'], + sort_on='mean', ascending=False, + xlim=None): """ - Report a boxplot and a set of statistics for a certain metrick + Report a boxplot and a set of statistics for a certain metric This is a convenience method to call both ``boxplot`` and ``describe`` at the same time to get a consistent graphical and numerical @@ -640,8 +706,10 @@ class WaResultsCollector(object): :param by: List of identifiers to group output as in DataFrame.groupby. """ - axes = self.boxplot(workload, metric, tag, kernel, test, by, xlim) - stats_df = self.describe(workload, metric, tag, kernel, test, by) + axes = self.boxplot(workload, metric, tag, kernel, test, + by, sort_on, ascending, xlim) + stats_df = self.describe(workload, metric, tag, kernel, test, + by, sort_on, ascending) display(stats_df) return (axes, stats_df) @@ -932,8 +1000,7 @@ class WaResultsCollector(object): job_dirs = df['_job_dir'].unique() if len(job_dirs) > 1: - raise ValueError( - "Params for get_artifacts don't uniquely identify a job. " + raise ValueError("Params for get_artifacts don't uniquely identify a job. " "for workload='{}' tag='{}' kernel='{}' test='{}' iteration={}, " "found:\n{}" .format( workload, tag, kernel, test, iteration, '\n'.join(job_dirs))) -- GitLab From bbedf51495393f023533bac10baa7a28464f53b6 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 18 Oct 2017 13:33:32 +0100 Subject: [PATCH 65/84] tools/wa_results_collector: Fix for new pandas GroupBy.describe output --- libs/utils/wa_results_collector.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index bcc6d77bf..f39062545 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -671,10 +671,8 @@ class WaResultsCollector(object): percentiles = sorted(list(set(percentiles))) grouped = df.groupby(by)['value'] - stats_df = pd.DataFrame( - grouped.describe(percentiles=percentiles)) - stats_df = stats_df.unstack() - stats_df.sort_values(by=[('value', sp.column)], + stats_df = grouped.describe(percentiles=percentiles) + stats_df.sort_values(by=sp.column, ascending=ascending, inplace=True) stats_df.rename(columns={'value': metric}, inplace=True) -- GitLab From c82755289e9b8b4a14de4aebbad273d48448a404 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Wed, 18 Oct 2017 14:45:04 +0100 Subject: [PATCH 66/84] tools/wa_results_collector: fix describe for PANDAs >v0.20.3 This is to ensure we always get the same output format with a column which represents the metrics being described. Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index f39062545..aff9b2ee0 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -671,8 +671,25 @@ class WaResultsCollector(object): percentiles = sorted(list(set(percentiles))) grouped = df.groupby(by)['value'] - stats_df = grouped.describe(percentiles=percentiles) - stats_df.sort_values(by=sp.column, + stats_df = pd.DataFrame( + grouped.describe(percentiles=percentiles)) + + # Use a consistent formatting independently from the PANDAs version + if 'value' in stats_df.columns: + # We must be running on a pre-0.20.0 version of pandas. + # unstack will convert the old output format to the new. + # http://pandas.pydata.org/pandas-docs/version/0.20/whatsnew.html#groupby-describe-formatting + # Main difference is that here we have a top-level column + # named 'value' + stats_df = stats_df.unstack() + else: + # Let's add a top-level column named 'value' which will be replaced + # by the actual metric name by the following code + stats_df.columns = pd.MultiIndex.from_product( + [['value'], stats_df.columns]) + + # Sort entries by the required metric and order value + stats_df.sort_values(by=[('value', sp.column)], ascending=ascending, inplace=True) stats_df.rename(columns={'value': metric}, inplace=True) -- GitLab From 1bada1e2d7a839fa6e03342e3ce58264376939a8 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Wed, 18 Oct 2017 18:12:26 +0100 Subject: [PATCH 67/84] tools/wa_results_collector: fix colors conversion Signed-off-by: Patrick Bellasi --- libs/utils/wa_results_collector.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index aff9b2ee0..86d76e396 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -27,6 +27,7 @@ import warnings from scipy.stats import ttest_ind import matplotlib.cm as cm import matplotlib.pyplot as plt +from matplotlib.colors import to_hex from conf import LisaLogging @@ -797,10 +798,11 @@ class WaResultsCollector(object): ax = cdf.df.plot(ax=axes, legend=False, xlim=(0,None), figsize=(16, 6), title='Total duration CDF ({:.1f}% within {} [{}] threshold)'\ .format(100. * cdf.below, threshold, units), - label=test, color=color) + label=test, + color=to_hex(color)) lines.append(ax.lines[-1]) axes.axhline(y=cdf.below, linewidth=1, - linestyle='--', color=color) + linestyle='--', color=to_hex(color)) self._log.debug("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below) axes.grid(True) -- GitLab From c2718e946eb822a98a37799925864c74f8d7be9e Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 18 Oct 2017 15:10:47 +0100 Subject: [PATCH 68/84] tools/wa_results_collector: Use Jankbench artifact --- libs/utils/wa_results_collector.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index 86d76e396..ce116be35 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -432,11 +432,8 @@ class WaResultsCollector(object): metrics_df = metrics_df.append( self._get_trace_metrics(artifacts['trace-cmd-bin'])) - # The "proper" way to do this would be to use WA's 'artifacts' - # mechanism, but for now just expecting a fixed filename is fine. - - if workload == 'jankbench': - df = pd.read_csv(os.path.join(job_dir, 'jankbench_frames.csv')) + if 'jankbench_results_csv' in artifacts: + df = pd.read_csv(artifacts['jankbench_results_csv']) df = pd.DataFrame({'value': df['total_duration']}) df.loc[:, 'metric'] = 'frame_total_duration' df.loc[:, 'units'] = 'ms' -- GitLab From 9af3146b0bbfcfaae57419310271f42e981f37a0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 18 Oct 2017 15:46:49 +0100 Subject: [PATCH 69/84] tools/wa_results_collector: Fix parsing energy samples - Separate parsing of different ACME device outputs - Add support for Monsoon samples --- libs/utils/wa_results_collector.py | 47 +++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/libs/utils/wa_results_collector.py b/libs/utils/wa_results_collector.py index ce116be35..d07df7236 100644 --- a/libs/utils/wa_results_collector.py +++ b/libs/utils/wa_results_collector.py @@ -440,13 +440,46 @@ class WaResultsCollector(object): metrics_df = metrics_df.append(df) - if 'energy_instrument_output' in artifacts: - df = pd.read_csv(artifacts['energy_instrument_output']) - df = pd.DataFrame({'value': df['device_power']}) - df.loc[:, 'metric'] = 'device_power_sample' - df.loc[:, 'units'] = 'watts' - - metrics_df = metrics_df.append(df) + # WA's metrics model just exports overall energy metrics, not individual + # samples. We're going to extend that with individual samples so if you + # want to you can see how much variation there was in energy usage. + # So we'll look for the actual CSV files and parse that by hand. + # The parsing necessary is specific to the energy measurement backend + # that was used, which WA doesn't currently report directly. + # TODO: once WA's reporting of this data has been cleaned up a bit I + # think we can simplify this. + for artifact_name, path in artifacts.iteritems(): + if artifact_name.startswith('energy_instrument_output'): + df = pd.read_csv(path) + + if 'device_power' in df.columns: + # Looks like this is from an ACME + + df = pd.DataFrame({'value': df['device_power']}) + + # Figure out what to call the sample metrics. If the + # artifact name has something extra, that will be the + # channel (IIO device) name. Use that to differentiate where + # the samples came from. If not just call it + # 'device_power_sample'. + device_name = artifact_name[len('energy_instrument_output') + 1:] + name_extra = device_name or 'device' + df.loc[:, 'metric'] = '{}_power_sample'.format(name_extra) + + df.loc[:, 'units'] = 'watts' + + metrics_df = metrics_df.append(df) + elif 'output_power' in df.columns and 'USB_power' in df.columns: + # Looks like this is from a Monsoon + # For monsoon the USB and device power are collected + # together with the same timestamps, so we can just add them + # up. + power_samples = df['output_power'] + df['USB_power'] + df = pd.DataFrame({'value': power_samples}) + df.loc[:, 'metric'] = 'device_power_sample' + df.loc[:, 'units'] = 'watts' + + metrics_df = metrics_df.append(df) return metrics_df -- GitLab From b07c691c28ecd1e4b84489e9f12bc230a8361301 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Wed, 18 Oct 2017 15:03:38 +0100 Subject: [PATCH 70/84] Add WA3 example notebook --- ipynb/wltests/WA3_Compare.ipynb | 286 ++++++++++++++++++++++++++++++++ 1 file changed, 286 insertions(+) create mode 100644 ipynb/wltests/WA3_Compare.ipynb diff --git a/ipynb/wltests/WA3_Compare.ipynb b/ipynb/wltests/WA3_Compare.ipynb new file mode 100644 index 000000000..8325178c0 --- /dev/null +++ b/ipynb/wltests/WA3_Compare.ipynb @@ -0,0 +1,286 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example use of `WaResultsCollector`\n", + "`WaResultsCollector` collects, analyses and visualises results from Workload Automation 3. Let's look at its docstring." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import logging\n", + "from IPython.display import display\n", + "\n", + "from wa_results_collector import WaResultsCollector" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from conf import LisaLogging\n", + "LisaLogging.setup()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print WaResultsCollector.__doc__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# You can configure the logging level for the WaResultsCollector\n", + "# by tuning its loglevel setting in the LISA_HOME/config.log" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Results collection\n", + "\n", + "If you have a LISA platform description for the platform the tests were run on, you can set it here to get extra metrics from trace analysis. If you set it to `None` you will still be able to anlyse all the non-trace-derived metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from libs.utils.platforms import hikey960\n", + "# platform = hikey960\n", + "platform = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "collector = WaResultsCollector(base_dir='../../results/wltests/', # Base path of your results folders\n", + " #wa_dirs='(substring_to_match)', # Parse only folder matching this regexp\n", + " #parse_traces=False, # Disable traces parsing if you don't care about trace metrics\n", + " platform=platform,\n", + " kernel_repo_path='~/sources/linux')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Collected Metrics" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = collector.results_df\n", + "logging.info(\"Metrics available for plots and analysis:\")\n", + "for metric in df.metric.unique().tolist():\n", + " logging.info(\" %s\", metric)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Jankbench" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Total Frame Duration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='jankbench', metric='frame_total_duration',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Energy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='jankbench', metric='device_total_energy',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Frames Duration CDF" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "for test in collector.tests(workload='jankbench'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.plot_cdf(workload='jankbench', metric='frame_total_duration', test=test, threshold=16)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exoplayer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dropper Frames" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for test in collector.tests(workload='exoplayer'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='exoplayer', metric='exoplayer_dropped_frames',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Energy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for test in collector.tests(workload='exoplayer'):\n", + " logging.info(\"Results for: %s\", test)\n", + " collector.report(workload='exoplayer', metric='device_total_energy',\n", + " test=test, sort_on='99%', ascending=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generic comparison plots\n", + "`plot_comparisons` can be used to automatically discover metrics that changed between different kernel versions or tags. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "collector.plot_comparisons(base_id=df['kernel'].iloc[0], by='kernel')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.12" + }, + "toc": { + "colors": { + "hover_highlight": "#DAA520", + "running_highlight": "#FF0000", + "selected_highlight": "#FFD700" + }, + "moveMenuLeft": true, + "nav_menu": { + "height": "100px", + "width": "252px" + }, + "navigate_menu": true, + "number_sections": true, + "sideBar": true, + "threshold": 4, + "toc_cell": false, + "toc_section_display": "block", + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} -- GitLab From fd431733273226313ce33dce9c647e227f9eb63e Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 16:33:43 +0100 Subject: [PATCH 71/84] tools/pcmark: Improve installation instructions --- tools/wa_user_directory/plugins/pcmark/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index 537a369b6..dedba9dfd 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -33,8 +33,9 @@ REGEXPS = { INSTALL_INSTRUCTIONS=""" This workload has incomplete automation support. Please download the APK from http://www.futuremark.com/downloads/pcmark-android.apk -and install it on the device. Then open the app on the device, and hit the -'install' button to set up the 'Work v2' benchmark. +and install it on the device. Connect the device to the internet, then open the +app on the device, and hit the 'install' button to set up the 'Work v2' +benchmark. """ class PcMark(Workload): -- GitLab From 779a5c079b9aff762bc49828d61c4904542f23cd Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 16:34:44 +0100 Subject: [PATCH 72/84] tools/pcmark: Add pcmark-specific logcat log for debugging --- tools/wa_user_directory/plugins/pcmark/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index dedba9dfd..c3695fdc0 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -94,7 +94,12 @@ class PcMark(Workload): self.target.execute('input keyevent KEYCODE_TAB') self.monitor = self.target.get_logcat_monitor() - self.monitor.start() + # Store the filtered logcat in a file. We don't add this as an artifact, + # there's already one created by the WA framework. This is just for + # debugging the PCMark workload. + logcat_path = os.path.join(context.output_directory, + 'pcmark_logcat.log') + self.monitor.start(logcat_path) def run(self, context): self.target.execute('input keyevent KEYCODE_ENTER') -- GitLab From 7a413e11ed7b608ab814c54cd833398dd4af9f94 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 16:37:32 +0100 Subject: [PATCH 73/84] tools/pcmark: Improve logcat parsing - Make REGEXPS a class attribute - Remove 'end' and 'score' (unused) - Drop pointless sleep, replace with check for benchmark start - Use regexps to filter logcat on target --- .../plugins/pcmark/__init__.py | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index c3695fdc0..9c56568d6 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -23,13 +23,6 @@ from zipfile import ZipFile from wa import Parameter, Workload from wa.framework.exception import WorkloadError -REGEXPS = { - 'start' : '.*START.*com.futuremark.pcmark.android.benchmark', - 'end' : '.*onWebViewReady.*view_scoredetails.html', - 'result' : '.*received result for correct code, result file in (?P.*\.zip)', - 'score' : '\s*.*)Score>(?P[0-9]*)<' -} - INSTALL_INSTRUCTIONS=""" This workload has incomplete automation support. Please download the APK from http://www.futuremark.com/downloads/pcmark-android.apk @@ -62,6 +55,12 @@ class PcMark(Workload): description='PCMark sub-benchmark to run'), ] + + regexps = { + 'start' : '.*START.*com.futuremark.pcmark.android.benchmark', + 'result': '.*received result for correct code, result file in (?P.*\.zip)' + } + def initialize(self, context): super(PcMark, self).initialize(context) @@ -93,7 +92,7 @@ class PcMark(Workload): self.target.execute('input keyevent KEYCODE_TAB') self.target.execute('input keyevent KEYCODE_TAB') - self.monitor = self.target.get_logcat_monitor() + self.monitor = self.target.get_logcat_monitor(self.regexps.values()) # Store the filtered logcat in a file. We don't add this as an artifact, # there's already one created by the WA framework. This is just for # debugging the PCMark workload. @@ -103,14 +102,16 @@ class PcMark(Workload): def run(self, context): self.target.execute('input keyevent KEYCODE_ENTER') - # Wait for page animations to end - time.sleep(10) - [self.output] = self.monitor.wait_for(REGEXPS['result'], timeout=600) + self.monitor.wait_for('.*START.*com.futuremark.pcmark.android.benchmark', + timeout=20) + self.logger.info('Detected PCMark start') + + [self.output] = self.monitor.wait_for(self.regexps['result'], timeout=600) def extract_results(self, context): # TODO should this be an artifact? - remote_zip_path = re.match(REGEXPS['result'], self.output).group('path') + remote_zip_path = re.match(self.regexps['result'], self.output).group('path') local_zip_path = os.path.join(context.output_directory, self.target.path.basename(remote_zip_path)) print 'pulling {} -> {}'.format(remote_zip_path, local_zip_path) -- GitLab From 81b3df4c817efa2a77ffac1b096d1f8dc666b86c Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 16:38:10 +0100 Subject: [PATCH 74/84] tools/pcmark: Fix up logging --- tools/wa_user_directory/plugins/pcmark/__init__.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index 9c56568d6..3f5d59891 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -114,10 +114,9 @@ class PcMark(Workload): remote_zip_path = re.match(self.regexps['result'], self.output).group('path') local_zip_path = os.path.join(context.output_directory, self.target.path.basename(remote_zip_path)) - print 'pulling {} -> {}'.format(remote_zip_path, local_zip_path) + self.logger.info('pulling {} -> {}'.format(remote_zip_path, local_zip_path)) self.target.pull(remote_zip_path, local_zip_path, as_root=True) - print 'extracting' with ZipFile(local_zip_path, 'r') as archive: archive.extractall(context.output_directory) @@ -127,10 +126,7 @@ class PcMark(Workload): for line in f: match = score_regex.match(line) if match: - print 'MATCH' metric_name = 'pcmark_{}'.format(match.group('name')) - print(metric_name) - print(match.group('score')) context.add_metric(metric_name, match.group('score')) -- GitLab From 2a8bb1b9900d8862b68a132e58d0bf2670259e78 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 16:38:22 +0100 Subject: [PATCH 75/84] tools/pcmark: Add Result.xml as artifact --- tools/wa_user_directory/plugins/pcmark/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index 3f5d59891..1d016b046 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -110,7 +110,6 @@ class PcMark(Workload): [self.output] = self.monitor.wait_for(self.regexps['result'], timeout=600) def extract_results(self, context): - # TODO should this be an artifact? remote_zip_path = re.match(self.regexps['result'], self.output).group('path') local_zip_path = os.path.join(context.output_directory, self.target.path.basename(remote_zip_path)) @@ -120,9 +119,14 @@ class PcMark(Workload): with ZipFile(local_zip_path, 'r') as archive: archive.extractall(context.output_directory) + xml_path = os.path.join(context.output_directory, 'Result.xml') + if not os.path.exists(xml_path): + raise WorkloadError("PCMark results .zip didn't contain Result.xml") + context.add_artifact('pcmark_result_xml', xml_path, 'data') + # Fetch workloads names and scores score_regex = re.compile('\s*.*)Score>(?P[0-9]*)<') - with open(os.path.join(context.output_directory, 'Result.xml')) as f: + with open(xml_path) as f: for line in f: match = score_regex.match(line) if match: -- GitLab From d5f3020925a80d84ec9223398d0c6ee08b407a8b Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 20 Oct 2017 19:14:43 +0100 Subject: [PATCH 76/84] Revert "tools/pcmark: Add pcmark-specific logcat log for debugging" This reverts commit 779a5c079b9aff762bc49828d61c4904542f23cd. --- tools/wa_user_directory/plugins/pcmark/__init__.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/wa_user_directory/plugins/pcmark/__init__.py b/tools/wa_user_directory/plugins/pcmark/__init__.py index 1d016b046..36f46a90c 100644 --- a/tools/wa_user_directory/plugins/pcmark/__init__.py +++ b/tools/wa_user_directory/plugins/pcmark/__init__.py @@ -93,12 +93,7 @@ class PcMark(Workload): self.target.execute('input keyevent KEYCODE_TAB') self.monitor = self.target.get_logcat_monitor(self.regexps.values()) - # Store the filtered logcat in a file. We don't add this as an artifact, - # there's already one created by the WA framework. This is just for - # debugging the PCMark workload. - logcat_path = os.path.join(context.output_directory, - 'pcmark_logcat.log') - self.monitor.start(logcat_path) + self.monitor.start() def run(self, context): self.target.execute('input keyevent KEYCODE_ENTER') -- GitLab From d88defebd253e402c022398d1b1099370d3bf5c3 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:16:57 +0100 Subject: [PATCH 77/84] tools/wltests: Remove ACME config from config.yaml This will be overridden by test_series which creates a separate config_acme.yaml file. --- tools/wa_user_directory/config.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml index a56448a9a..0f2cbe931 100644 --- a/tools/wa_user_directory/config.yaml +++ b/tools/wa_user_directory/config.yaml @@ -7,18 +7,6 @@ device: generic_android # --config/-c option. allow_phone_home: false -# Set up ACME -energy_measurement: - instrument: acme_cape - instrument_parameters: - host: baylibre-acme.local - # If collecting on multiple channels, or another channel than iio:device0, - # set them here: - iio_devices: ["iio:device0"] - - # If iio-capture is not in your $PATH, uncomment this and set the path. - # iio-capture: /path/to/iio-capture - trace_cmd: buffer_size: 102400 report: false -- GitLab From 64c71665b08c864281687538dbdb74f1962fbf82 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:17:19 +0100 Subject: [PATCH 78/84] tools/wltests: Clean up indentation in config.yaml --- tools/wa_user_directory/config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml index 0f2cbe931..99b2724fa 100644 --- a/tools/wa_user_directory/config.yaml +++ b/tools/wa_user_directory/config.yaml @@ -8,8 +8,8 @@ device: generic_android allow_phone_home: false trace_cmd: - buffer_size: 102400 - report: false + buffer_size: 102400 + report: false # Disable re-trying things that go wrong max_retries: 0 -- GitLab From 87a73d5ded112a8518029c761a662537866426d8 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:26:38 +0100 Subject: [PATCH 79/84] tools/wltests/test_series: Clarify lack of '=' in --acme_channels example --- tools/wltests/test_series | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/wltests/test_series b/tools/wltests/test_series index 5c89e0f88..a2ec8c7dc 100755 --- a/tools/wltests/test_series +++ b/tools/wltests/test_series @@ -129,7 +129,8 @@ Additional arguments: assthrought connetion. Default ACME_USB=device1 --acme_channels A space separated list for channels ID to sample. - For example, "0 2" will enabled sampling on: + For example, --acme_channels "0 2" will enabled + sampling on: iio:device0 and iio:device2 Default: ACME_CHANNELS="0" -- GitLab From 7bc5311fd2ea3cd29aacf90209fc35f92ca96ea0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:29:39 +0100 Subject: [PATCH 80/84] submodules: Update devlib to latest master This brings the is_network_connected method which WA3 will need for the network_required attribute implementation --- libs/devlib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/devlib b/libs/devlib index 16d5e0b6a..af0ed2ab4 160000 --- a/libs/devlib +++ b/libs/devlib @@ -1 +1 @@ -Subproject commit 16d5e0b6a78c676449596b3bae41ff65979b4181 +Subproject commit af0ed2ab4806fa15edec3f16c83c651da82a5757 -- GitLab From 33ae24f3457718149b975292acf1d9776fbba58c Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:28:38 +0100 Subject: [PATCH 81/84] submodules: Update workload-automation to latest next This brings the network_required Workload attribute and the bail_on_init_failure setting --- tools/workload-automation | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/workload-automation b/tools/workload-automation index 42b148655..eb0f53c8f 160000 --- a/tools/workload-automation +++ b/tools/workload-automation @@ -1 +1 @@ -Subproject commit 42b1486559e5559300d94508ce4b74551825e238 +Subproject commit eb0f53c8f644e6acf99ffba9921c1d0c8186af55 -- GitLab From 7ab0e84dd5dff0f5bc409219880fb0468a9afbeb Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 24 Oct 2017 17:37:53 +0100 Subject: [PATCH 82/84] tools/wltests: Disable bail_on_init_failure for WA3 That means if one workload fails to initialize (usually because of missing .apk, unrooted target, or no network connection), we continue to run all the others and collect results for those. --- tools/wa_user_directory/config.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/wa_user_directory/config.yaml b/tools/wa_user_directory/config.yaml index 99b2724fa..0fb4aa003 100644 --- a/tools/wa_user_directory/config.yaml +++ b/tools/wa_user_directory/config.yaml @@ -13,3 +13,7 @@ trace_cmd: # Disable re-trying things that go wrong max_retries: 0 + +# If any of the workloads fail during the initialization phase, don't bail out +# on the rest of the run +bail_on_init_failure: false -- GitLab From 79c226891fa8651311ea30688ea18cce4241d116 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 25 Oct 2017 12:03:10 +0100 Subject: [PATCH 83/84] tools/wltests: Add dmesg to instrumentation in example agenda --- tools/wltests/agendas/example-rich.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/wltests/agendas/example-rich.yaml b/tools/wltests/agendas/example-rich.yaml index 8a069112c..f8a14a05f 100644 --- a/tools/wltests/agendas/example-rich.yaml +++ b/tools/wltests/agendas/example-rich.yaml @@ -7,10 +7,10 @@ # config: - # Collect energy data and ftrace files + # Collect energy data, ftrace files, and dmesg # You may want to edit your config.yaml to set up the energy_measurement # instrument (an example configuration is provided in this repo). - instrumentation: [energy_measurement, trace-cmd] + instrumentation: [energy_measurement, trace-cmd, dmesg] global: # Do everything 5 times -- GitLab From 04ce5b2a18058dd9255fde2ffe973f8ac07cd6cb Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 25 Oct 2017 12:06:20 +0100 Subject: [PATCH 84/84] tools/wltests: Move instrumentation to 'global' section The 'config' section is just confusing --- tools/wltests/agendas/example-rich.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/wltests/agendas/example-rich.yaml b/tools/wltests/agendas/example-rich.yaml index f8a14a05f..21fcb8f2a 100644 --- a/tools/wltests/agendas/example-rich.yaml +++ b/tools/wltests/agendas/example-rich.yaml @@ -6,13 +6,11 @@ # mechanisms). # -config: +global: # Collect energy data, ftrace files, and dmesg # You may want to edit your config.yaml to set up the energy_measurement # instrument (an example configuration is provided in this repo). instrumentation: [energy_measurement, trace-cmd, dmesg] - -global: # Do everything 5 times iterations: 5 -- GitLab