From f7a293d7f19276aab881c43432296999957a6ee1 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Thu, 31 Jan 2019 10:04:10 +0000 Subject: [PATCH 1/2] bart: fix duplicated values of switch-in Sometimes a task would switch in and then out immediately causing bart to get confused as the 2 events contain the same timestamp. Add a check to detect these scenarios and deal with it gracefully. This was observed on fastmodels where the time is simulated. I couldn't find an evidence that the fastmodel is doing something wrong as the traces look sane it's just a task was preempted as soon as it started running and for some reason the recorded timestamp for these events is exactly the same. It is assumed an artefact of simulated time where the distance between the 2 events is too fast that the simulated time didn't get a chance to advance for the few nanoseconds inbetween the 2 events. Traceback (most recent call last): File "/mnt/data/src/lisa-next/tools/exekall/exekall/engine.py", line 1964, in genf val = self.callable_(*args, **kwargs) File "/mnt/data/src/lisa-next/tools/exekall/exekall/engine.py", line 1676, in __call__ return __UnboundMethod_self__.__wrapped__(*args, **kwargs) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 511, in test_task_util_avg return self._test_all_freq(item_test) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 532, in _test_all_freq for item in self.invariance_items File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 532, in for item in self.invariance_items File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 509, in item_test allowed_error_pct=allowed_error_pct File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 375, in test_task_util_avg return self._test_signal('util', allowed_error_pct) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 343, in _test_signal signal_name, allowed_error_pct, self.trace, self.cpu, name, capacity) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 304, in _test_task_signal exp_signal = self.get_expected_util_avg(trace, cpu, task_name, capacity) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 293, in get_expected_util_avg duty_cycle_pct = self.get_task_duty_cycle_pct(trace, task_name, cpu) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/load_tracking.py", line 126, in get_task_duty_cycle_pct return SchedAssert(trace.ftrace, top, execname=task_name).getDutyCycle(window) File "/mnt/data/src/lisa-next/external/bart/bart/sched/SchedAssert.py", line 550, in getDutyCycle return self.getRuntime(window, percent=True) File "/mnt/data/src/lisa-next/external/bart/bart/sched/SchedAssert.py", line 388, in getRuntime run_time = agg.aggregate(level="all", window=window)[0] File "/mnt/data/src/lisa-next/external/trappy/trappy/stats/Aggregator.py", line 144, in aggregate level_res = self._aggfunc(self._result[group[0]], **kwargs) File "/mnt/data/src/lisa-next/external/bart/bart/sched/functions.py", line 311, in residency_sum len(s_out))) RuntimeError: Unexpected Lengths: s_in=126, s_out=127 Signed-off-by: Qais Yousef --- external/bart/bart/sched/functions.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/external/bart/bart/sched/functions.py b/external/bart/bart/sched/functions.py index 9185f2ff1..ac7c0b266 100644 --- a/external/bart/bart/sched/functions.py +++ b/external/bart/bart/sched/functions.py @@ -139,6 +139,25 @@ def sanitize_asymmetry(series, window=None): if window: series.index.values[-1] = window[1] + # Remove repeated entries - which could happen if a task switch in + # then immediately switches out; ie: time stamp is exactly the same + n = 0 + next = n + 1 + while next < len(series): + if not series.values[n]: + n = next + next = next + 1 + continue + + while not series.values[next]: + next = next + 1 + + if series.values[n] == series.values[next]: + series = series.drop(series.index[next]) + else: + n = next + next = next + 1 + # No point if the series just has one value and # one event. We do not have sufficient data points # for any calculation. We should Ideally never reach -- GitLab From 7358df7bd85edd1beb632f1ae63bfda68c0d1505 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Mon, 4 Feb 2019 08:28:40 +0000 Subject: [PATCH 2/2] eas_behaviour: fix handling dupliated entries We can end up with duplicated timestamps sometimes, which causes an exception to be thrown when attemptong to pivot(). Fix this by removing the duplicate entries before the call to pivot() Traceback (most recent call last): File "/mnt/data/src/lisa-next/tools/exekall/exekall/engine.py", line 1964, in genf val = self.callable_(*args, **kwargs) File "/mnt/data/src/lisa-next/tools/exekall/exekall/engine.py", line 1676, in __call__ return __UnboundMethod_self__.__wrapped__(*args, **kwargs) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/trace.py", line 1142, in wrapper return f(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/base.py", line 594, in wrapper res = func(self, *args, **kwargs) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/eas_behaviour.py", line 315, in test_task_placement est_power = self._get_estimated_power_df(nrg_model) File "/mnt/data/src/lisa-next/lisa/tests/scheduler/eas_behaviour.py", line 263, in _get_estimated_power_df task_cpu_df = self._get_task_cpu_df() File "/mnt/data/src/lisa-next/lisa/tests/scheduler/eas_behaviour.py", line 142, in _get_task_cpu_df df = df.pivot(index=df.index, columns='next_comm').fillna(method='ffill') File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/frame.py", line 5628, in pivot return pivot(self, index=index, columns=columns, values=values) File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/reshape/pivot.py", line 388, in pivot return indexed.unstack(columns) File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/frame.py", line 5992, in unstack return unstack(self, level, fill_value) File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/reshape/reshape.py", line 388, in unstack return _unstack_frame(obj, level, fill_value=fill_value) File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/reshape/reshape.py", line 411, in _unstack_frame constructor=obj._constructor) File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/reshape/reshape.py", line 128, in __init__ self._make_selectors() File "/mnt/data/src/lisa-next/.lisa-venv-3.6/lib/python3.6/site-packages/pandas/core/reshape/reshape.py", line 166, in _make_selectors raise ValueError('Index contains duplicate entries, ' ValueError: Index contains duplicate entries, cannot reshape Finished UUID=d7a191b828d84ac788e94b6512c34fe0 EnergyModelWakeMigration[board=sgm776]:test_task_placement ^^^^^^^^^^^^^^^^^^^ EXCEPTION (ValueError): Index contains duplicate entries, cannot reshape Signed-off-by: Qais Yousef --- lisa/tests/scheduler/eas_behaviour.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lisa/tests/scheduler/eas_behaviour.py b/lisa/tests/scheduler/eas_behaviour.py index abf0c18da..aea76096b 100644 --- a/lisa/tests/scheduler/eas_behaviour.py +++ b/lisa/tests/scheduler/eas_behaviour.py @@ -139,6 +139,7 @@ class EASBehaviour(RTATestBundle): df = self.trace.ftrace.sched_switch.data_frame[['next_comm', '__cpu']] df = df[df['next_comm'].isin(tasks)] + df = df[~df.index.duplicated()] df = df.pivot(index=df.index, columns='next_comm').fillna(method='ffill') cpu_df = df['__cpu'] # Drop consecutive duplicates -- GitLab