From f2427377320225fddec969fda7786d11cdf61586 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Thu, 13 Apr 2017 14:15:42 +0100 Subject: [PATCH 1/8] android/benchmark: add iterations command line parameter It is usually handy to be able to run the same benchmark multiple times (e.g., to do statistics on results). Add a --iterations command line parameter to implement the feature. Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index 0f804ef40..41dd81578 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -59,6 +59,9 @@ class LisaBenchmark(object): bm_collect = None """Override this with the set of data to collect during test exeution""" + bm_iterations = 1 + """Override this with the desired number of iterations of the test""" + def benchmarkInit(self): """ Code executed before running the benchmark @@ -106,6 +109,9 @@ class LisaBenchmark(object): help='Set of metrics to collect, ' 'e.g. "energy systrace_30" to sample energy and collect a 30s systrace, ' 'if specified overrides test defaults') + parser.add_argument('--iterations', type=int, + default=1, + help='Number of iterations the same test has to be repeated for (default 1)') # Measurements settings parser.add_argument('--iio-channel-map', type=str, @@ -132,6 +138,8 @@ class LisaBenchmark(object): self.bm_conf['results_dir'] = self.args.results_dir if self.args.collect: self.bm_collect = self.args.collect + if self.args.iterations: + self.bm_iterations = self.args.iterations # Override energy meter configuration if self.args.iio_channel_map: @@ -203,9 +211,16 @@ class LisaBenchmark(object): raise self._log.info('=== Execution...') - self.wl.run(out_dir=self.out_dir, - collect=self._getBmCollect(), - **self.bm_params) + for iter_id in range(1, self.bm_iterations+1): + self._log.info('=== Iteration {}/{}...'.format(iter_id, self.bm_iterations)) + out_dir = os.path.join(self.out_dir, "{:03d}".format(iter_id)) + try: + os.makedirs(out_dir) + except: pass + + self.wl.run(out_dir=out_dir, + collect=self._getBmCollect(), + **self.bm_params) self._log.info('=== Finalization...') self.benchmarkFinalize() -- GitLab From cd1873b266452ac54943c8892234229f3a754215 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Thu, 13 Apr 2017 16:53:36 +0100 Subject: [PATCH 2/8] android/benchmark: add iterations-pause command line parameter Add a --iterations-pause command line parameter to be able to configure the amount of time to pause for before each iteration of the benchmark. We add the feature by creating a new private callback function called _preRun() than is always called before each iteration of the benchmark. Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index 41dd81578..269989ebf 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -62,6 +62,12 @@ class LisaBenchmark(object): bm_iterations = 1 """Override this with the desired number of iterations of the test""" + bm_iterations_pause = 30 + """ + Override this with the desired amount of time (in seconds) to pause + for before each iteration + """ + def benchmarkInit(self): """ Code executed before running the benchmark @@ -112,6 +118,9 @@ class LisaBenchmark(object): parser.add_argument('--iterations', type=int, default=1, help='Number of iterations the same test has to be repeated for (default 1)') + parser.add_argument('--iterations-pause', type=int, + default=30, + help='Amount of time (in seconds) to pause for before each iteration (default 30s)') # Measurements settings parser.add_argument('--iio-channel-map', type=str, @@ -140,6 +149,8 @@ class LisaBenchmark(object): self.bm_collect = self.args.collect if self.args.iterations: self.bm_iterations = self.args.iterations + if self.args.iterations_pause: + self.bm_iterations_pause = self.args.iterations_pause # Override energy meter configuration if self.args.iio_channel_map: @@ -186,6 +197,14 @@ class LisaBenchmark(object): return '' return self.bm_collect + def _preRun(self): + """ + Code executed before every iteration of the benchmark + """ + self._log.info('Waiting {}[s] before executing iteration...'\ + .format(self.bm_iterations_pause)) + sleep(self.bm_iterations_pause) + def __init__(self): """ Set up logging and trigger running experiments @@ -218,6 +237,8 @@ class LisaBenchmark(object): os.makedirs(out_dir) except: pass + self._preRun() + self.wl.run(out_dir=out_dir, collect=self._getBmCollect(), **self.bm_params) -- GitLab From c094581afc96ba9341657ee25d3eb4dce18e4333 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 10:09:58 +0100 Subject: [PATCH 3/8] android/benchmark: add iterations-reboot command line parameter Add a command line parameter (--iterations-reboot) to be able to configure behaviour before each iterations (reboot or not reboot). Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index 269989ebf..429037eb1 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -68,6 +68,12 @@ class LisaBenchmark(object): for before each iteration """ + bm_iterations_reboot = False + """ + Override this with the desired behaviour: reboot or not reboot before + each iteration + """ + def benchmarkInit(self): """ Code executed before running the benchmark @@ -121,6 +127,8 @@ class LisaBenchmark(object): parser.add_argument('--iterations-pause', type=int, default=30, help='Amount of time (in seconds) to pause for before each iteration (default 30s)') + parser.add_argument('--iterations-reboot', action="store_true", + help='Reboot before each iteration (default False)') # Measurements settings parser.add_argument('--iio-channel-map', type=str, @@ -151,6 +159,8 @@ class LisaBenchmark(object): self.bm_iterations = self.args.iterations if self.args.iterations_pause: self.bm_iterations_pause = self.args.iterations_pause + if self.args.iterations_reboot: + self.bm_iterations_reboot = True # Override energy meter configuration if self.args.iio_channel_map: @@ -201,9 +211,15 @@ class LisaBenchmark(object): """ Code executed before every iteration of the benchmark """ - self._log.info('Waiting {}[s] before executing iteration...'\ - .format(self.bm_iterations_pause)) - sleep(self.bm_iterations_pause) + rebooted = False + + if self.bm_iterations_reboot: + rebooted = self.reboot_target() + + if not rebooted: + self._log.info('Waiting {}[s] before executing iteration...'\ + .format(self.bm_iterations_pause)) + sleep(self.bm_iterations_pause) def __init__(self): """ @@ -283,6 +299,7 @@ class LisaBenchmark(object): method will reboot the target with the specified kernel and wait for the target to be up and running. """ + rebooted = False # Reboot the device, if a boot_image has been specified if self.args.boot_image: @@ -299,6 +316,7 @@ class LisaBenchmark(object): self._log.debug('Waiting {}[s] for boot to start...'\ .format(self.args.boot_timeout)) sleep(self.args.boot_timeout) + rebooted = True else: self._log.warning('Device NOT rebooted, using current image') @@ -322,4 +340,6 @@ class LisaBenchmark(object): # Wait for the system to complete the boot self._wait_for_logcat_idle() + return rebooted + # vim :set tabstop=4 shiftwidth=4 expandtab -- GitLab From 78b9217cdddabc0903c68c018d33f2d6995f3042 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 10:17:28 +0100 Subject: [PATCH 4/8] android/benchmark: wait 60s by default after rebooting Be more conservative and wait 60s (instead of 20) after each reboot to be sure platform is stable. Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index 429037eb1..ba515218e 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -100,8 +100,8 @@ class LisaBenchmark(object): default=None, help='Path of the Android boot.img to be used') parser.add_argument('--boot-timeout', type=int, - default=20, - help='Timeout in [s] to wait after a reboot (default 20)') + default=60, + help='Timeout in [s] to wait after a reboot (default 60)') # Android settings parser.add_argument('--android-device', type=str, -- GitLab From 0a2fbfc0b6e04261ee5dccf8b7e5e7a736ee57a6 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 10:42:31 +0100 Subject: [PATCH 5/8] android/benchmark: implement reboot as part of new _preInit When --boot-image is present we can trigger reboot from a newly introduced private _preInit() callback (instead of letting the benchmarks implement the rebooting scheme). This should improve the design and make benchmarks implementations less error prone. Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index ba515218e..8f3a54c51 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -59,6 +59,9 @@ class LisaBenchmark(object): bm_collect = None """Override this with the set of data to collect during test exeution""" + bm_reboot = False + """Override this with True if a boot image was passed as command line parameter""" + bm_iterations = 1 """Override this with the desired number of iterations of the test""" @@ -147,6 +150,8 @@ class LisaBenchmark(object): raise NotImplementedError(msg) # Override default configuration with command line parameters + if self.args.boot_image: + self.bm_reboot = True if self.args.android_device: self.bm_conf['device'] = self.args.android_device if self.args.android_home: @@ -207,13 +212,22 @@ class LisaBenchmark(object): return '' return self.bm_collect + def _preInit(self): + """ + Code executed before running the benchmark + """ + # If iterations_reboot is True we are going to reboot before the + # first iteration anyway. + if self.bm_reboot and not self.bm_iterations_reboot: + self.reboot_target() + def _preRun(self): """ Code executed before every iteration of the benchmark """ rebooted = False - if self.bm_iterations_reboot: + if self.bm_reboot and self.bm_iterations_reboot: rebooted = self.reboot_target() if not rebooted: @@ -240,6 +254,7 @@ class LisaBenchmark(object): self.wl = self._getWorkload() self.out_dir=self.te.res_dir try: + self._preInit() self.benchmarkInit() except: self._log.warning('Benchmark initialization failed: execution aborted') -- GitLab From 693a36843ebf77c01ea4b45eed6c8c09006200a9 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 11:00:19 +0100 Subject: [PATCH 6/8] tests/benchmarks: adapt to the new reboot process Since benchmarkInit() now supports rebooting the device, benchmark should migrate to the new implementation. Do it. Signed-off-by: Juri Lelli --- tests/benchmarks/android_geekbench.py | 10 ++-------- tests/benchmarks/android_gmaps.py | 10 ++-------- tests/benchmarks/android_jankbench.py | 10 ++-------- tests/benchmarks/android_uibench.py | 10 ++-------- tests/benchmarks/android_vellamo.py | 10 ++-------- tests/benchmarks/android_youtube.py | 10 ++-------- 6 files changed, 12 insertions(+), 48 deletions(-) diff --git a/tests/benchmarks/android_geekbench.py b/tests/benchmarks/android_geekbench.py index 6cd1df5b4..96ad4c139 100755 --- a/tests/benchmarks/android_geekbench.py +++ b/tests/benchmarks/android_geekbench.py @@ -66,8 +66,6 @@ class GeekbenchTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -75,8 +73,7 @@ class GeekbenchTest(LisaBenchmark): self.delay_after_s) sleep(self.delay_after_s) - def __init__(self, governor, test, reboot=False, delay_after_s=0): - self.reboot = reboot + def __init__(self, governor, test, delay_after_s=0): self.governor = governor self.test = test self.delay_after_s = delay_after_s @@ -161,8 +158,6 @@ tests = [ 'COMPUTE' ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(tests) tests_completed = 0 for governor in governors: @@ -170,12 +165,11 @@ for governor in governors: tests_remaining -= 1 delay_after_s = 30 if tests_remaining else 0 try: - GeekbenchTest(governor, test, do_reboot, delay_after_s) + GeekbenchTest(governor, test, delay_after_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) diff --git a/tests/benchmarks/android_gmaps.py b/tests/benchmarks/android_gmaps.py index 77afa139f..1c6467263 100755 --- a/tests/benchmarks/android_gmaps.py +++ b/tests/benchmarks/android_gmaps.py @@ -66,8 +66,6 @@ class GMapsTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -75,9 +73,8 @@ class GMapsTest(LisaBenchmark): self.delay_after_s) sleep(self.delay_after_s) - def __init__(self, governor, location_search, swipe_count, reboot=False, + def __init__(self, governor, location_search, swipe_count, delay_after_s=0): - self.reboot = reboot self.governor = governor self.location_search = location_search self.swipe_count = swipe_count @@ -167,8 +164,6 @@ locations = [ "London British Museum" ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(locations) tests_completed = 0 for governor in governors: @@ -177,12 +172,11 @@ for governor in governors: delay_after_s = 30 if tests_remaining else 0 try: GMapsTest(governor, location, swipe_count, - do_reboot, delay_after_s) + delay_after_s) tests_completed += 1 except: # A test configuration failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) diff --git a/tests/benchmarks/android_jankbench.py b/tests/benchmarks/android_jankbench.py index 160373035..967dd60a0 100755 --- a/tests/benchmarks/android_jankbench.py +++ b/tests/benchmarks/android_jankbench.py @@ -67,8 +67,6 @@ class JankbenchTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -77,8 +75,7 @@ class JankbenchTest(LisaBenchmark): sleep(self.delay_after_s) def __init__(self, governor, test, iterations, - reboot=False, delay_after_s=0): - self.reboot = reboot + delay_after_s=0): self.governor = governor self.test = test self.iterations = iterations @@ -172,8 +169,6 @@ tests = [ 'edit_text' ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(tests) tests_completed = 0 for governor in governors: @@ -182,12 +177,11 @@ for governor in governors: delay_after_s = 30 if tests_remaining else 0 try: JankbenchTest(governor, test, iterations, - do_reboot, delay_after_s) + delay_after_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) diff --git a/tests/benchmarks/android_uibench.py b/tests/benchmarks/android_uibench.py index c59ae6d04..376e7682f 100755 --- a/tests/benchmarks/android_uibench.py +++ b/tests/benchmarks/android_uibench.py @@ -66,8 +66,6 @@ class UiBenchTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -75,9 +73,8 @@ class UiBenchTest(LisaBenchmark): self.delay_after_s) sleep(self.delay_after_s) - def __init__(self, governor, test, duration_s, reboot=False, + def __init__(self, governor, test, duration_s, delay_after_s=0): - self.reboot = reboot self.governor = governor self.test = test self.duration_s = duration_s @@ -184,8 +181,6 @@ tests = [ 'ActivityTransitionDetails', ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(tests) tests_completed = 0 for governor in governors: @@ -194,12 +189,11 @@ for governor in governors: delay_after_s = 30 if tests_remaining else 0 try: UiBenchTest(governor, test, duration_s, - do_reboot, delay_after_s) + delay_after_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) diff --git a/tests/benchmarks/android_vellamo.py b/tests/benchmarks/android_vellamo.py index 8afea2246..265febca7 100755 --- a/tests/benchmarks/android_vellamo.py +++ b/tests/benchmarks/android_vellamo.py @@ -66,8 +66,6 @@ class VellamoTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -75,8 +73,7 @@ class VellamoTest(LisaBenchmark): self.delay_after_s) sleep(self.delay_after_s) - def __init__(self, governor, test, reboot=False, delay_after_s=0): - self.reboot = reboot + def __init__(self, governor, test, delay_after_s=0): self.governor = governor self.test = test self.delay_after_s = delay_after_s @@ -164,8 +161,6 @@ tests = [ ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(tests) tests_completed = 0 for governor in governors: @@ -173,12 +168,11 @@ for governor in governors: tests_remaining -= 1 delay_after_s = 30 if tests_remaining else 0 try: - VellamoTest(governor, test, do_reboot, delay_after_s) + VellamoTest(governor, test, delay_after_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) diff --git a/tests/benchmarks/android_youtube.py b/tests/benchmarks/android_youtube.py index e09ca7f14..3afa21703 100755 --- a/tests/benchmarks/android_youtube.py +++ b/tests/benchmarks/android_youtube.py @@ -66,8 +66,6 @@ class YouTubeTest(LisaBenchmark): def benchmarkInit(self): self.setupWorkload() self.setupGovernor() - if self.reboot: - self.reboot_target() def benchmarkFinalize(self): if self.delay_after_s: @@ -75,9 +73,8 @@ class YouTubeTest(LisaBenchmark): self.delay_after_s) sleep(self.delay_after_s) - def __init__(self, governor, video_url, video_duration_s, reboot=False, + def __init__(self, governor, video_url, video_duration_s, delay_after_s=0): - self.reboot = reboot self.governor = governor self.video_url = video_url self.video_duration_s = video_duration_s @@ -166,8 +163,6 @@ video_urls = [ 'https://youtu.be/XSGBVzeBUbk?t=45s', ] -# Reboot device only the first time -do_reboot = True tests_remaining = len(governors) * len(video_urls) tests_completed = 0 for governor in governors: @@ -176,12 +171,11 @@ for governor in governors: delay_after_s = 30 if tests_remaining else 0 try: YouTubeTest(governor, url, video_duration_s, - do_reboot, delay_after_s) + delay_after_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests pass - do_reboot = False # We want to collect data from at least one governor assert(tests_completed >= 1) -- GitLab From a6c02aeee59cfb0dde62a538f573d77914b02018 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 15:21:50 +0100 Subject: [PATCH 7/8] android/benchmark: don't wait before first iteration Even when --iterations-pause is used, doesn't make much sense to pause before the first iteration. Prevent that by using an iterations counter. Signed-off-by: Juri Lelli --- libs/utils/android/benchmark.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libs/utils/android/benchmark.py b/libs/utils/android/benchmark.py index 8f3a54c51..842d3cff1 100644 --- a/libs/utils/android/benchmark.py +++ b/libs/utils/android/benchmark.py @@ -221,6 +221,8 @@ class LisaBenchmark(object): if self.bm_reboot and not self.bm_iterations_reboot: self.reboot_target() + self.iterations_count = 1 + def _preRun(self): """ Code executed before every iteration of the benchmark @@ -230,11 +232,13 @@ class LisaBenchmark(object): if self.bm_reboot and self.bm_iterations_reboot: rebooted = self.reboot_target() - if not rebooted: - self._log.info('Waiting {}[s] before executing iteration...'\ - .format(self.bm_iterations_pause)) + if not rebooted and self.iterations_count > 1: + self._log.info('Waiting {}[s] before executing iteration {}...'\ + .format(self.bm_iterations_pause, self.iterations_count)) sleep(self.bm_iterations_pause) + self.iterations_count += 1 + def __init__(self): """ Set up logging and trigger running experiments -- GitLab From 6604ad2a223e1667da125965b64f907c3191f723 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Fri, 14 Apr 2017 15:30:16 +0100 Subject: [PATCH 8/8] tests/benchmarks: remove redefinition of benchmarkFinalize What benchmarkFinalize currently only does is to pause for an amount of time before proceding with the next iteration. Since we now do the same _before_ each iteration (with --iterations-pause) the redefinition is redundant. Remove it from all benchmarks. Signed-off-by: Juri Lelli --- tests/benchmarks/android_geekbench.py | 12 ++---------- tests/benchmarks/android_gmaps.py | 14 ++------------ tests/benchmarks/android_jankbench.py | 14 ++------------ tests/benchmarks/android_uibench.py | 14 ++------------ tests/benchmarks/android_vellamo.py | 12 ++---------- tests/benchmarks/android_youtube.py | 14 ++------------ 6 files changed, 12 insertions(+), 68 deletions(-) diff --git a/tests/benchmarks/android_geekbench.py b/tests/benchmarks/android_geekbench.py index 96ad4c139..d8b1af391 100755 --- a/tests/benchmarks/android_geekbench.py +++ b/tests/benchmarks/android_geekbench.py @@ -67,16 +67,9 @@ class GeekbenchTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, test, delay_after_s=0): + def __init__(self, governor, test): self.governor = governor self.test = test - self.delay_after_s = delay_after_s super(GeekbenchTest, self).__init__() def setupWorkload(self): @@ -163,9 +156,8 @@ tests_completed = 0 for governor in governors: for test in tests: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - GeekbenchTest(governor, test, delay_after_s) + GeekbenchTest(governor, test) tests_completed += 1 except: # A test configuraion failed, continue with other tests diff --git a/tests/benchmarks/android_gmaps.py b/tests/benchmarks/android_gmaps.py index 1c6467263..fc2b92950 100755 --- a/tests/benchmarks/android_gmaps.py +++ b/tests/benchmarks/android_gmaps.py @@ -67,18 +67,10 @@ class GMapsTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, location_search, swipe_count, - delay_after_s=0): + def __init__(self, governor, location_search, swipe_count): self.governor = governor self.location_search = location_search self.swipe_count = swipe_count - self.delay_after_s = delay_after_s super(GMapsTest, self).__init__() def setupWorkload(self): @@ -169,10 +161,8 @@ tests_completed = 0 for governor in governors: for location in locations: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - GMapsTest(governor, location, swipe_count, - delay_after_s) + GMapsTest(governor, location, swipe_count) tests_completed += 1 except: # A test configuration failed, continue with other tests diff --git a/tests/benchmarks/android_jankbench.py b/tests/benchmarks/android_jankbench.py index 967dd60a0..883cc3fcc 100755 --- a/tests/benchmarks/android_jankbench.py +++ b/tests/benchmarks/android_jankbench.py @@ -68,18 +68,10 @@ class JankbenchTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, test, iterations, - delay_after_s=0): + def __init__(self, governor, test, iterations): self.governor = governor self.test = test self.iterations = iterations - self.delay_after_s = delay_after_s super(JankbenchTest, self).__init__() def setupWorkload(self): @@ -174,10 +166,8 @@ tests_completed = 0 for governor in governors: for test in tests: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - JankbenchTest(governor, test, iterations, - delay_after_s) + JankbenchTest(governor, test, iterations) tests_completed += 1 except: # A test configuraion failed, continue with other tests diff --git a/tests/benchmarks/android_uibench.py b/tests/benchmarks/android_uibench.py index 376e7682f..5058d16dc 100755 --- a/tests/benchmarks/android_uibench.py +++ b/tests/benchmarks/android_uibench.py @@ -67,18 +67,10 @@ class UiBenchTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, test, duration_s, - delay_after_s=0): + def __init__(self, governor, test, duration_s): self.governor = governor self.test = test self.duration_s = duration_s - self.delay_after_s = delay_after_s super(UiBenchTest, self).__init__() def setupWorkload(self): @@ -186,10 +178,8 @@ tests_completed = 0 for governor in governors: for test in tests: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - UiBenchTest(governor, test, duration_s, - delay_after_s) + UiBenchTest(governor, test, duration_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests diff --git a/tests/benchmarks/android_vellamo.py b/tests/benchmarks/android_vellamo.py index 265febca7..645093164 100755 --- a/tests/benchmarks/android_vellamo.py +++ b/tests/benchmarks/android_vellamo.py @@ -67,16 +67,9 @@ class VellamoTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, test, delay_after_s=0): + def __init__(self, governor, test): self.governor = governor self.test = test - self.delay_after_s = delay_after_s super(VellamoTest, self).__init__() def setupWorkload(self): @@ -166,9 +159,8 @@ tests_completed = 0 for governor in governors: for test in tests: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - VellamoTest(governor, test, delay_after_s) + VellamoTest(governor, test) tests_completed += 1 except: # A test configuraion failed, continue with other tests diff --git a/tests/benchmarks/android_youtube.py b/tests/benchmarks/android_youtube.py index 3afa21703..382296585 100755 --- a/tests/benchmarks/android_youtube.py +++ b/tests/benchmarks/android_youtube.py @@ -67,18 +67,10 @@ class YouTubeTest(LisaBenchmark): self.setupWorkload() self.setupGovernor() - def benchmarkFinalize(self): - if self.delay_after_s: - self._log.info("Waiting %d[s] before to continue...", - self.delay_after_s) - sleep(self.delay_after_s) - - def __init__(self, governor, video_url, video_duration_s, - delay_after_s=0): + def __init__(self, governor, video_url, video_duration_s): self.governor = governor self.video_url = video_url self.video_duration_s = video_duration_s - self.delay_after_s = delay_after_s super(YouTubeTest, self).__init__() def setupWorkload(self): @@ -168,10 +160,8 @@ tests_completed = 0 for governor in governors: for url in video_urls: tests_remaining -= 1 - delay_after_s = 30 if tests_remaining else 0 try: - YouTubeTest(governor, url, video_duration_s, - delay_after_s) + YouTubeTest(governor, url, video_duration_s) tests_completed += 1 except: # A test configuraion failed, continue with other tests -- GitLab