diff --git a/lisa/_kmod.py b/lisa/_kmod.py index 07cf89e1138ba0d2dfce7950e1d43bd457ca87fe..7e625e863173d6576d1730184cd24ca7d1852cbf 100644 --- a/lisa/_kmod.py +++ b/lisa/_kmod.py @@ -985,7 +985,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): @classmethod - def _prepare_tree(cls, path, cc, abi, build_conf, apply_overlays): + def _prepare_tree(cls, path, cc, abi, build_conf, apply_overlays, fixup_atomic_headers=False): logger = cls.get_logger() path = Path(path) @@ -1025,24 +1025,37 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): bind_paths = {path: path} def fixup_kernel_build_env(): - # TODO: re-assess - - # The headers in /sys/kheaders.tar.xz generated by - # CONFIG_IKHEADERS=y are broken since kernel/gen_kheaders.sh strip - # some comments from the file. KBuild then proceeds on checking the - # checksum and the check fails. This used to be a simple warning, - # but has now been turned into an error in recent kernels. - # We remove the SHA1 from the file so that the check is skipped. - with contextlib.suppress(FileNotFoundError): - for _path in (path / 'include' / 'linux' / 'atomic').iterdir(): - content = _path.read_bytes() - lines = [line for line in content.split(b'\n') if line] - if lines and lines[-1].lstrip().startswith(b'//'): - # Remove the last line, containing the sha1 - content = b'\n'.join(lines[:-1]) + b'\n' - sha1 = hashlib.sha1(content).hexdigest() - content += b'// ' + sha1.encode('ascii') + b'\n' - _path.write_bytes(content) + + if fixup_atomic_headers: + # TODO: re-assess + + # The headers in /sys/kheaders.tar.xz generated by + # CONFIG_IKHEADERS=y are broken since kernel/gen_kheaders.sh strip + # some comments from the file. KBuild then proceeds on checking the + # checksum and the check fails. This used to be a simple warning, + # but has now been turned into an error in recent kernels. + # We remove the SHA1 from the file so that the check is skipped. + with contextlib.suppress(FileNotFoundError): + def join(lines): + return b'\n'.join(lines) + b'\n' + + for _path in (path / 'include' / 'linux' / 'atomic').iterdir(): + content = _path.read_bytes() + lines = content.split(b'\n') + i, last_line = [(i, line) for (i, line) in enumerate(lines) if line][-1] + + if lines and last_line.lstrip().startswith(b'//'): + # Remove the last line, containing the sha1 + without_sha1 = copy.copy(lines) + del without_sha1[i] + without_sha1 = join(without_sha1) + sha1 = hashlib.sha1(without_sha1).hexdigest() + + # Update the sha1 + updated = copy.copy(lines) + updated[i] = b'// ' + sha1.encode('ascii') + updated = join(updated) + _path.write_bytes(updated) if build_conf['build-env'] == 'alpine': @@ -1415,7 +1428,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): raise ValueError(f'Building from /lib/modules/.../build/ is only supported for local targets') @contextlib.contextmanager - def _from_target_sources(configs, pull): + def _from_target_sources(configs, pull, **kwargs): """ Overlay some content taken from the target on the user tree, such as /proc/config.gz @@ -1442,6 +1455,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): cache=cache, tree_path=tree_path, build_conf=build_conf, + **kwargs, ) as tree: yield tree._to_spec() @@ -1461,6 +1475,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): return _from_target_sources( configs=['CONFIG_IKHEADERS', 'CONFIG_IKCONFIG_PROC'], pull=pull, + fixup_atomic_headers=True, ) def from_proc_config(): diff --git a/lisa/datautils.py b/lisa/datautils.py index 6c95b2c9debbd736b51dafd7c3103d72578388d1..800cad94410cc23c99f3b92ef1dde661f44219c5 100644 --- a/lisa/datautils.py +++ b/lisa/datautils.py @@ -321,8 +321,8 @@ def df_squash(df, start, end, column='delta'): # If s1 is in the interval, we just need to cap its len to # s1 - e1.index - prev_df = df[:start] - middle_df = df[start:end] + prev_df = df.loc[:start] + middle_df = df.loc[start:end] # Tweak the closest previous event to include it in the slice if not prev_df.empty and start not in middle_df.index: @@ -1851,7 +1851,7 @@ def series_convert(series, dtype, nullable=None): pipelines.append( # Otherwise fallback to calling the type directly - lambda series: series.apply(convert, convert_dtype=False) + lambda series: series.astype(object).apply(convert) ) # Then try with a nullable type. @@ -1939,20 +1939,19 @@ def series_convert(series, dtype, nullable=None): # unusable if ( series.dtype.name == 'object' and - series.apply(isinstance, args=(bytes,), convert_dtype=False).any() + series.astype(object).apply(isinstance, args=(bytes,)).any() ): string_basic = None # Handle mixed dtypes - str_basic = lambda x : x.apply( + str_basic = lambda x : x.astype(object).apply( lambda x: x.decode('ascii') if isinstance(x, bytes) else str(x), - convert_dtype=False ) else: string_basic = basic str_basic = make_convert(str) # Faster than Series.str.decode() - basic_decode = lambda x : x.apply(bytes.decode, args=('ascii',), convert_dtype=False) + basic_decode = lambda x : x.astype(object).apply(bytes.decode, args=('ascii',)) # Significantly faster than Series.str.decode() def fast_decode(x):