diff --git a/lisa/_assets/kmodules/lisa/ftrace_events.h b/lisa/_assets/kmodules/lisa/ftrace_events.h index 213cd0d489c3fbadc96e551b8f77bf8f3542ff17..6282d205efc26bf6d71ad49dd1a2bba0ede16255 100644 --- a/lisa/_assets/kmodules/lisa/ftrace_events.h +++ b/lisa/_assets/kmodules/lisa/ftrace_events.h @@ -309,7 +309,7 @@ TRACE_EVENT_CONDITION(lisa__uclamp_util_se, TP_fast_assign( __entry->pid = p->pid; memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->cpu = rq_cpu(rq); + __entry->cpu = rq ? rq_cpu(rq) : -1; __entry->util_avg = p->se.avg.util_avg; __entry->uclamp_avg = uclamp_rq_util_with(rq, p->se.avg.util_avg); @@ -354,7 +354,7 @@ TRACE_EVENT_CONDITION(lisa__uclamp_util_cfs, ), TP_fast_assign( - __entry->cpu = rq_cpu(rq); + __entry->cpu = rq ? rq_cpu(rq) : -1; __entry->util_avg = cfs_rq->avg.util_avg; __entry->uclamp_avg = uclamp_rq_util_with(rq, cfs_rq->avg.util_avg); __entry->uclamp_min = rq->uclamp[UCLAMP_MIN].value; diff --git a/lisa/_assets/kmodules/lisa/sched_helpers.h b/lisa/_assets/kmodules/lisa/sched_helpers.h index db58ec19ad6a55c8ec4ef7836a7e1dbb1ecc7040..0ee4ce3ba0a86ec648a09b651415c7410a06f93f 100644 --- a/lisa/_assets/kmodules/lisa/sched_helpers.h +++ b/lisa/_assets/kmodules/lisa/sched_helpers.h @@ -18,20 +18,27 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { return cfs_rq->rq; } -# else +# elif HAS_MEMBER(struct, rq, cfs) static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { return container_of(cfs_rq, struct rq, cfs); } +# else +# warning "Cannot get the parent struct rq of a struct cfs_rq" # endif #endif +static inline bool entity_is_task(struct sched_entity *se) +{ + return #if HAS_MEMBER(struct, sched_entity, my_q) -# define entity_is_task(se) (!(se)->my_q) + !se->my_q #else -# define entity_is_task(se) (1) + true #endif + ; +} #if HAS_TYPE(struct, rq) @@ -76,8 +83,7 @@ static int autogroup_path(struct task_group *tg, char *buf, int buflen) #if HAS_TYPE(struct, rq) /* A cut down version of the original. @p MUST be NULL */ -static __always_inline -unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util) +static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util) { # if HAS_KERNEL_FEATURE(CFS_UCLAMP) unsigned long min_util; @@ -160,7 +166,7 @@ static inline char *cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) static inline int cfs_rq_cpu(struct cfs_rq *cfs_rq) { - return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; + return cpu_of(rq_of(cfs_rq)); } #endif @@ -195,7 +201,7 @@ static inline const struct sched_avg *rq_avg_irq(struct rq *rq) static inline int rq_cpu(struct rq *rq) { - return rq ? cpu_of(rq) : -1; + return cpu_of(rq); } static inline int rq_cpu_capacity(struct rq *rq) @@ -239,14 +245,13 @@ static inline int rq_cpu_current_capacity(struct rq *rq) ; } +# if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) static inline int rq_nr_running(struct rq *rq) { -# if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) - if (rq->nr_running) - return rq->nr_running; -# endif - return -1; + return rq->nr_running; } +# endif + #endif #if HAS_TYPE(struct, root_domain) diff --git a/lisa/_assets/kmodules/lisa/tp.c b/lisa/_assets/kmodules/lisa/tp.c index a0e85b6e3a0b41e0d184f34e26908897d383b9fe..4e3cad9ca3b51edb75a5a8f90b088c8b5d9f8399 100644 --- a/lisa/_assets/kmodules/lisa/tp.c +++ b/lisa/_assets/kmodules/lisa/tp.c @@ -16,15 +16,14 @@ static inline void _trace_cfs(struct cfs_rq *cfs_rq, void (*trace_event)(int, char*, const struct sched_avg*)) { - const struct sched_avg *avg; - char path[PATH_SIZE]; - int cpu; - - avg = cfs_rq_avg(cfs_rq); - cfs_rq_path(cfs_rq, path, PATH_SIZE); - cpu = cfs_rq_cpu(cfs_rq); + if (cfs_rq) { + const struct sched_avg *avg = cfs_rq_avg(cfs_rq); + char path[PATH_SIZE]; + int cpu = cfs_rq_cpu(cfs_rq); - trace_event(cpu, path, avg); + cfs_rq_path(cfs_rq, path, PATH_SIZE); + trace_event(cpu, path, avg); + } } #endif @@ -36,18 +35,14 @@ static inline void _trace_se(struct sched_entity *se, { void *gcfs_rq = get_group_cfs_rq(se); void *cfs_rq = get_se_cfs_rq(se); - struct task_struct *p; char path[PATH_SIZE]; - char *comm; - pid_t pid; - int cpu; cfs_rq_path(gcfs_rq, path, PATH_SIZE); - cpu = cfs_rq_cpu(cfs_rq); + int cpu = cfs_rq ? cfs_rq_cpu(cfs_rq) : -1; - p = gcfs_rq ? NULL : container_of(se, struct task_struct, se); - comm = p ? p->comm : "(null)"; - pid = p ? p->pid : -1; + struct task_struct *p = gcfs_rq ? NULL : container_of(se, struct task_struct, se); + char *comm = p ? p->comm : "(null)"; + pid_t pid = p ? p->pid : -1; trace_event(cpu, path, comm, pid, &se->avg); } @@ -72,13 +67,15 @@ DEFINE_TP_EVENT_FEATURE(lisa__uclamp_util_cfs, pelt_cfs_tp, uclamp_util_cfs_prob #if HAS_KERNEL_FEATURE(RT_PELT) static void sched_pelt_rt_probe(void *feature, struct rq *rq) { - const struct sched_avg *avg = rq_avg_rt(rq); - int cpu = rq_cpu(rq); + if (rq) { + const struct sched_avg *avg = rq_avg_rt(rq); + int cpu = rq_cpu(rq); - if (!avg) - return; + if (!avg) + return; - trace_lisa__sched_pelt_rt(cpu, avg); + trace_lisa__sched_pelt_rt(cpu, avg); + } } DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_rt, pelt_rt_tp, sched_pelt_rt_probe); #endif @@ -86,13 +83,15 @@ DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_rt, pelt_rt_tp, sched_pelt_rt_probe); #if HAS_KERNEL_FEATURE(DL_PELT) static void sched_pelt_dl_probe(void *feature, struct rq *rq) { - const struct sched_avg *avg = rq_avg_dl(rq); - int cpu = rq_cpu(rq); + if (rq) { + const struct sched_avg *avg = rq_avg_dl(rq); + int cpu = rq_cpu(rq); - if (!avg) - return; + if (!avg) + return; - trace_lisa__sched_pelt_dl(cpu, avg); + trace_lisa__sched_pelt_dl(cpu, avg); + } } DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_dl, pelt_dl_tp, sched_pelt_dl_probe); #endif @@ -100,13 +99,15 @@ DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_dl, pelt_dl_tp, sched_pelt_dl_probe); #if HAS_KERNEL_FEATURE(IRQ_PELT) static void sched_pelt_irq_probe(void *feature, struct rq *rq) { - const struct sched_avg *avg = rq_avg_irq(rq); - int cpu = rq_cpu(rq); + if (rq) { + const struct sched_avg *avg = rq_avg_irq(rq); + int cpu = rq_cpu(rq); - if (!avg) - return; + if (!avg) + return; - trace_lisa__sched_pelt_irq(cpu, avg); + trace_lisa__sched_pelt_irq(cpu, avg); + } } DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_irq, pelt_irq_tp, sched_pelt_irq_probe); #endif @@ -149,7 +150,7 @@ DEFINE_TP_EVENT_FEATURE(lisa__sched_overutilized, sched_overutilized_tp, sched_o #if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) static void sched_update_nr_running_probe(void *feature, struct rq *rq, int change) { - if (trace_lisa__sched_update_nr_running_enabled()) { + if (rq) { int cpu = rq_cpu(rq); int nr_running = rq_nr_running(rq); diff --git a/lisa/_doc/helpers.py b/lisa/_doc/helpers.py index baa3a10d3d37b218aea17876de028035a676c759..01023937b73c3baf6663988d74a3f9621bdb7bcc 100644 --- a/lisa/_doc/helpers.py +++ b/lisa/_doc/helpers.py @@ -41,7 +41,7 @@ from sphinx.ext.autodoc import exclude_members_option import lisa import lisa.analysis from lisa.analysis.base import AnalysisHelpers, TraceAnalysisBase -from lisa.utils import get_subclasses, import_all_submodules, DEPRECATED_MAP, get_sphinx_name, groupby, get_short_doc, order_as +from lisa.utils import get_subclasses, import_all_submodules, DEPRECATED_MAP, get_sphinx_name, groupby, get_short_doc, order_as, is_link_dead from lisa.trace import TraceEventCheckerBase from lisa.conf import KeyDesc, SimpleMultiSrcConf, TopLevelKeyDesc from lisa.version import format_version @@ -418,18 +418,7 @@ def find_dead_links(content): @functools.lru_cache(maxsize=None) def check(url): - # Some HTTP servers (including ReadTheDocs) will return 403 Forbidden - # if no User-Agent is given - headers={ - 'User-Agent': 'Wget/1.13.4 (linux-gnu)', - } - request = Request(url, headers=headers) - try: - urlopen(request) - except (HTTPError, URLError) as e: - return e.reason - else: - return None + return is_link_dead(url) errors = { link: check(link) diff --git a/lisa/_kmod.py b/lisa/_kmod.py index 9a86bd8e533edca91c5240351bc8ed85ea0469bb..9e45366c3c56e5e50693f612c3f316490c3a7a4c 100644 --- a/lisa/_kmod.py +++ b/lisa/_kmod.py @@ -141,7 +141,7 @@ from devlib.target import KernelVersion, TypedKernelConfig, KernelConfigTristate from devlib.host import LocalConnection from devlib.exception import TargetStableError, TargetStableCalledProcessError -from lisa.utils import nullcontext, Loggable, LISA_CACHE_HOME, checksum, DirCache, chain_cm, memoized, LISA_HOST_ABI, subprocess_log, SerializeViaConstructor, destroyablecontextmanager, ContextManagerExit, ignore_exceps, get_nested_key +from lisa.utils import nullcontext, Loggable, LISA_CACHE_HOME, checksum, DirCache, chain_cm, memoized, LISA_HOST_ABI, subprocess_log, SerializeViaConstructor, destroyablecontextmanager, ContextManagerExit, ignore_exceps, get_nested_key, is_link_dead from lisa._assets import ASSETS_PATH, HOST_PATH, ABI_BINARIES_FOLDER from lisa._unshare import ensure_root import lisa._git as git @@ -155,7 +155,58 @@ class KmodVersionError(Exception): pass +_ALPINE_DEFAULT_VERSION = '3.18.3' _ALPINE_ROOTFS_URL = 'https://dl-cdn.alpinelinux.org/alpine/v{minor}/releases/{arch}/alpine-minirootfs-{version}-{arch}.tar.gz' +_ALPINE_PACKAGE_INFO_URL = 'https://pkgs.alpinelinux.org/package/v{version}/{repo}/{arch}/{package}' + + +def _get_alpine_clang_packages(cc): + llvm_version = _clang_version_static(cc) or '' + return [ + f'clang{llvm_version}', + f'llvm{llvm_version}', + # "lld" packaging is a bit strange, any versioned lld (e.g. "lld15") + # conflicts with the generic "lld" package. On top of that, there is + # only one versionned package ("lld15") as of Alpine v3.18. + f'lld' + ] + + +@functools.lru_cache(maxsize=256, typed=True) +def _find_alpine_cc_packages(version, abi, cc, cross_compile): + logger = logging.getLogger(f'{__name__}.alpine_chroot.packages') + + if 'gcc' in cc and cross_compile: + cross_compile = cross_compile.strip('-') + packages = [f'gcc-{cross_compile}'] + elif 'clang' in cc: + packages = _get_alpine_clang_packages(cc) + else: + packages = [cc] + + def check(repo, package): + url = _ALPINE_PACKAGE_INFO_URL.format( + version='.'.join(map(str, version[:2])), + repo=repo, + arch=_abi_to_alpine_arch(abi), + package=package, + ) + logger.debug(f'Checking Alpine package URL: {url}') + return not is_link_dead(url) + + ok = all( + any( + check(repo, package) + for repo in ('main', 'community') + ) + for package in packages + ) + + if ok: + return packages + else: + raise ValueError(f'Could not find Alpine linux packages: {", ".join(packages)}') + def _abi_to_kernel_arch(abi): """ @@ -177,6 +228,12 @@ def _kernel_arch_to_abi(arch): else: return arch +def _abi_to_alpine_arch(abi): + return { + 'arm64': 'aarch64', + 'armeabi': 'armv7', + }.get(abi, abi) + def _url_path(url): return PurePosixPath( @@ -224,68 +281,112 @@ def _kbuild_make_cmd(path, targets, cc, make_vars): return cmd +def _clang_version_static(cc): + try: + _, version = cc.split('-', 1) + except ValueError: + # apk understands "clang" even if there is no clang package + version = None + else: + version = int(version) + + return version + + +def _clang_version(cc, env): + version = subprocess.check_output([cc, '--version'], env=env) + m = re.match(rb'.*clang version ([0-9]+)\.', version) + if m: + major = int(m.group(1)) + return (major,) + else: + raise ValueError(f'Could not determine version of {cc}') + + +def _resolve_alpine_version(version): + version = version or _ALPINE_DEFAULT_VERSION + + # Ensure we have a full version number with 3 components + version = version.split('.') + version = list(map(int, version + ['0' for _ in range(3 - len(version))])) + return version + + @destroyablecontextmanager -def _make_build_chroot(cc, abi, bind_paths=None, version=None, overlay_backend=None, packages=None): +def _make_build_chroot(cc, cross_compile, abi, bind_paths=None, version=None, overlay_backend=None, packages=None): """ Create a chroot folder ready to be used to build a kernel. """ logger = logging.getLogger(f'{__name__}.alpine_chroot') - def is_clang(cc): - return cc.startswith('clang') - - def default_packages(cc): - # Default packages needed to compile a linux kernel module - packages = [ - 'bash', - 'binutils', - 'coreutils', - 'diffutils', - 'make', - 'file', - 'gawk', - 'sed', - 'musl-dev', - 'elfutils-dev', - 'gmp-dev', - 'libffi-dev', - 'openssl-dev', - 'linux-headers', - 'musl', - 'bison', - 'flex', - 'python3', - 'py3-pip', - 'perl', - ] + if (version, packages) != (None, None) and None in (version, packages): + raise ValueError('Both version and packages need to be set or none of them') + else: + version = _resolve_alpine_version(version) + + def is_clang(cc): + return cc.startswith('clang') + + def default_packages(cc): + maybe_qemu = False + + # Default packages needed to compile a linux kernel module + packages = [ + 'bash', + 'binutils', + 'coreutils', + 'diffutils', + 'make', + 'file', + 'gawk', + 'sed', + 'musl-dev', + 'elfutils-dev', + 'gmp-dev', + 'libffi-dev', + 'openssl-dev', + 'linux-headers', + 'musl', + 'bison', + 'flex', + 'python3', + 'py3-pip', + 'perl', + ] + + if is_clang(cc): + packages.extend([ + # Add version-less packages as well, so that userspace tools + # relying on "clang" when LLVM=1 is passed can work. + 'llvm', + 'clang', + 'lld', + ]) - if is_clang(cc): try: - _, version = cc.split('-', 1) + _packages = _find_alpine_cc_packages( + version=tuple(version), + abi=abi, + cc=cc, + cross_compile=cross_compile, + ) except ValueError: - # apk understands "clang" even if there is no clang package - version = '' - - packages.extend([ - 'lld', - f'llvm{version}', - f'clang{version}', - ]) - else: - packages.append(cc) + # We could not find the cross compilation toolchain, so + # fallback on the non-cross toolchain and use QEMU + _packages = [cc] + # clang is always a cross compilation, toolchain so we + # would not need QEMU for that + maybe_qemu = not is_clang(cc) - return packages + packages.extend(_packages) - if (version, packages) != (None, None) and None in (version, packages): - raise ValueError('Both version and packages need to be set or none of them') - else: - version = version or '3.18.3' - packages = default_packages(cc) if packages is None else packages + return (maybe_qemu, packages) + maybe_qemu, packages = default_packages(cc) if packages is None else packages use_qemu = ( + maybe_qemu and # Since clang binaries support cross compilation without issues, # there is no need to use QEMU that will slow everything down. - (not is_clang(cc)) and abi != LISA_HOST_ABI ) @@ -332,8 +433,9 @@ def _make_alpine_chroot(version, packages=None, abi=None, bind_paths=None, overl # Packages have already been installed, so we can speed things up a # bit if init_cache: - _version = version.split('.') - minor = '.'.join(_version[:2]) + version = list(map(str, version)) + minor = '.'.join(version[:2]) + version = '.'.join(version) url = _ALPINE_ROOTFS_URL.format( minor=minor, arch=alpine_arch, @@ -361,16 +463,11 @@ def _make_alpine_chroot(version, packages=None, abi=None, bind_paths=None, overl install_packages(packages) - # Ensure we have a full version number with 3 components - version = version.split('.') - version = version + ['0' for _ in range(3 - len(version))] - version = '.'.join(version) - abi = abi or LISA_HOST_ABI use_qemu = abi != LISA_HOST_ABI qemu_msg = f' using QEMU userspace emulation to emulate {abi} on {LISA_HOST_ABI}' if use_qemu else '' - logger.debug(f'Using Alpine v{version} chroot with ABI {abi}{qemu_msg}.') + logger.debug(f'Using Alpine v{".".join(map(str, version))} chroot with ABI {abi}{qemu_msg}.') # Check that QEMU userspace emulation is setup if we need it if use_qemu: @@ -383,11 +480,7 @@ def _make_alpine_chroot(version, packages=None, abi=None, bind_paths=None, overl if not binfmt_path.exists(): raise ValueError(f'Alpine chroot is setup for {qemu_arch} architecture but QEMU userspace emulation is not installed on the host (missing {binfmt_path})') - alpine_arch = { - 'arm64': 'aarch64', - 'armeabi': 'armv7', - }.get(abi, abi) - + alpine_arch = _abi_to_alpine_arch(abi) dir_cache = DirCache( category='alpine_chroot', populate=populate, @@ -798,6 +891,10 @@ class _KernelBuildEnvConf(SimpleMultiSrcConf): ) ) + def _get_alpine_version(self): + alpine_version = self['build-env-settings']['alpine'].get('version') + return _resolve_alpine_version(alpine_version) + class _KernelBuildEnv(Loggable, SerializeViaConstructor): """ @@ -833,7 +930,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): def __init__(self, path_cm, build_conf=None): self._make_path_cm = path_cm - self.conf, self.cc, self.abi = self._resolve_conf(build_conf) + self.conf, self.cc, self.cross_compile, self.abi = self._resolve_conf(build_conf) self._path_cm = None self.path = None @@ -856,10 +953,10 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): raise TypeError(f'Unsupported value type for build_conf: {conf}') conf = make_conf(conf) - make_vars, cc, abi = cls._process_make_vars(conf, abi=abi, target=target) + make_vars, cc, cross_compile, abi = cls._process_make_vars(conf, abi=abi, target=target) conf.add_src(src='processed make-variables', conf={'make-variables': make_vars}) - return (conf, cc, abi) + return (conf, cc, cross_compile, abi) _SPEC_KEYS = ('path', 'checksum') @@ -986,7 +1083,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): @classmethod - def _prepare_tree(cls, path, cc, abi, build_conf, apply_overlays): + def _prepare_tree(cls, path, cc, cross_compile, abi, build_conf, apply_overlays): logger = cls.get_logger() path = Path(path) @@ -1067,6 +1164,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): def cmd_cm(cmds): with _make_build_chroot( cc=cc, + cross_compile=cross_compile, abi=abi, bind_paths=bind_paths, overlay_backend=overlay_backend, @@ -1123,6 +1221,8 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): @classmethod def _process_make_vars(cls, build_conf, abi, target=None): + logger = cls.get_logger() + env = { k: str(v) for k, v in ( @@ -1167,15 +1267,20 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): }, inplace=False, ) - make_vars, cc = cls._resolve_toolchain(abi, build_conf, target=target) + cc, cross_compile = cls._resolve_toolchain(abi, build_conf, target=target) - if build_conf['build-env'] == 'alpine': - if cc.startswith('clang'): - make_vars['LLVM'] = '1' - else: - # Disable CROSS_COMPILE as we are going to build in a "native" - # Alpine chroot, so there is no need for a cross compiler - make_vars.pop('CROSS_COMPILE', None) + if 'clang' in cc and 'LLVM' not in make_vars: + clang_version = _clang_version_static(cc) + llvm_version = f'-{clang_version}' if clang_version else '1' + if build_conf['build-env'] == 'alpine': + # TODO: Revisit: + # We do not use llvm_version here as Alpine does not ship + # multiple versions of e.g. lld, only multiple versions of + # clang. Kbuild fails to find ld.lld- since that + # binary does not exist on Alpine. Same goes for other tools + # like "ar" or "nm" + llvm_version = '1' + make_vars['LLVM'] = llvm_version # Turn errors into warnings by default, as this otherwise prevents the # builds when the list of kernel symbols is not available. @@ -1186,8 +1291,40 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): # then be re-filtered right before invoking make to remove CC=gcc as it # can confuse KBuild. make_vars['CC'] = cc + if cross_compile: + make_vars['CROSS_COMPILE'] = cross_compile + + + # LLVM=0 is treated the same way as LLVM=1 by Kbuild, so we need to + # remove it. + if make_vars.get('LLVM') == '0': + del make_vars['LLVM'] + + # Some kernels have broken/old Kbuild that does not honor the LLVM=-N + # suffixing, so force the suffixes ourselves. + llvm = make_vars.get('LLVM') + if llvm and llvm.startswith('-'): + updated = { + 'LD': f'ld.lld{llvm}', + 'AR': f'llvm-ar{llvm}', + 'NM': f'llvm-nm{llvm}', + 'OBJCOPY': f'llvm-objcopy{llvm}', + 'OBJDUMP': f'llvm-objdump{llvm}', + 'READELF': f'llvm-readelf{llvm}', + 'STRIP': f'llvm-strip{llvm}', + } + make_vars = {**updated, **make_vars} + assert 'ARCH' in make_vars - return (make_vars, cc, abi) + + def log_fragment(var): + val = make_vars.get(var) + fragment = f'{var}={val}' if val is not None else '' + return fragment + + variables = ', '.join(filter(bool, map(log_fragment, ('CC', 'CROSS_COMPILE', 'LLVM', 'ARCH')))) + logger.info(f'Toolchain detected: {variables}') + return (make_vars, cc, cross_compile, abi) @classmethod def _make_toolchain_env(cls, toolchain_path=None, env=None): @@ -1213,10 +1350,11 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): def _check_cc_version(cls, cc, toolchain_path): if cc == 'clang': env = cls._make_toolchain_env(toolchain_path) - version = subprocess.check_output([cc, '--version'], env=env) - m = re.match(rb'.*clang version ([0-9]+)\.', version) - if m: - major = int(m.group(1)) + try: + major, *_ = _clang_version(cc, env=env) + except ValueError: + pass + else: if major >= cls._MIN_CLANG_VERSION: return True else: @@ -1230,7 +1368,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): env = cls._make_toolchain_env_from_conf(build_conf) def priority_to(cc): - return lambda _cc, _cmd: 0 if cc in _cc else 1 + return lambda _cc: 0 if cc in _cc else 1 cc_priority = priority_to('clang') @@ -1244,19 +1382,32 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): cc_priority = priority_to('clang') else: clang_version = clang_version // 10_000 - def cc_priority(cc, cmd): + is_host_env = build_conf['build-env'] == 'host' + + def version_key(version): + return ( + 0 if version >= clang_version else 1, + # Try the versions closest to the one we + # want + abs(clang_version - version) + ) + + def cc_priority(cc): if 'clang' in cc: version = re.search(r'[0-9]+', cc) if version is None: - return (2,) + if is_host_env: + try: + version, *_ = _clang_version(cc, env=env) + except ValueError: + return (2,) + else: + return version_key(version) + else: + return (2,) else: version = int(version.group(0)) - return ( - 0 if version >= clang_version else 1, - # Try the versions closest to the one we - # want - abs(clang_version - version) - ) + return version_key(version) else: return (3,) else: @@ -1273,100 +1424,98 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): make_vars = build_conf.get('make-variables', {}) - def pick_first(toolchains): - found = [ - toolchain - for toolchain in toolchains - if shutil.which(f'{toolchain}gcc') is not None - ] - # If no toolchain is found, we pick the first one that will be used - # for clang target triplet - try: - return found[0] - except IndexError: - return toolchains[0] - if abi == LISA_HOST_ABI: - toolchain = None + cross_compiles = [''] else: try: - toolchain = make_vars['CROSS_COMPILE'] + cross_compiles = [make_vars['CROSS_COMPILE']] except KeyError: try: - toolchain = os.environ['CROSS_COMPILE'] + cross_compiles = [os.environ['CROSS_COMPILE']] except KeyError: if abi == 'arm64': - toolchain = pick_first(['aarch64-linux-gnu-', 'aarch64-none-elf-']) + cross_compiles = ['aarch64-linux-gnu-', 'aarch64-none-elf-', 'aarch64-linux-android-', 'aarch64-none-linux-android-', 'aarch64-linux-android-'] elif abi == 'armeabi': - toolchain = pick_first(['arm-linux-gnueabi-', 'arm-none-eabi-']) + cross_compiles = ['arm-linux-gnueabi-', 'arm-none-eabi-', 'arm-none-linux-gnueabi-'] elif abi == 'x86': - toolchain = 'i686-linux-gnu-' + cross_compiles = ['i686-linux-gnu-'] else: - toolchain = None + cross_compiles = [''] logger.error(f'ABI {abi} not recognized, CROSS_COMPILE env var needs to be set') - logger.debug(f'CROSS_COMPILE env var not set, assuming "{toolchain}"') - - def test_cmd(cc): - return [cc, *([f'--target={toolchain}'] if toolchain else []), '-x' 'c', '-c', '-', '-o', '/dev/null'] - - commands = { - 'gcc': [f'{toolchain or ""}gcc', '-x' 'c', '-c', '-', '-o', '/dev/null'], - **{ - cc: test_cmd(cc) - # Try the default "clang" name first in case it's good enough - for cc in ['clang'] + [ - f'clang-{i}' - # Try the most recent ones first - for i in reversed( - # Cover for the next 10 years starting from 2021 - range(cls._MIN_CLANG_VERSION, cls._MIN_CLANG_VERSION + 10 * 2) - ) - ] - }, + logger.debug(f'CROSS_COMPILE env var not set, assuming "{cross_compiles}"') + + cross_compiles = cross_compiles or [''] + + # The format of "ccs" dict is: + # (CC=, CROSS_COMPILE=): + ccs = { + *( + (f'clang-{i}', cross_compile) + # Cover for the next 10 years starting from 2021 + for i in reversed(range( + cls._MIN_CLANG_VERSION, + cls._MIN_CLANG_VERSION + 10 * 2 + )) + for cross_compile in cross_compiles + ), + *( + ('clang', cross_compile) + for cross_compile in cross_compiles + ), + *( + ('gcc', cross_compile) + for cross_compile in cross_compiles + ), } - cc = None - if 'CC' in make_vars: - cc = make_vars['CC'] - try: - commands = {cc: commands[cc]} - except KeyError: - commands = {} - # Default to clang on alpine, as it will be in a high-enough version - # and since Alpine does not ship any cross-toolchain for GCC, this will - # avoid having to use QEMU userspace emulation which is really slow. - elif build_conf['build-env'] == 'alpine': - cc = 'clang' + _cc = make_vars['CC'] + ccs = { + (_cc, cross_compile) + for cross_compile in cross_compiles + } if 'LLVM' in make_vars: - cc = cc or 'clang' llvm = make_vars['LLVM'] - version = llvm if llvm.startswith('-') else '' - if cc == 'clang' and version: - cc = cc + version - commands = { - cc: test_cmd(cc), + _cc = make_vars.get('CC', 'clang') + llvm_version = llvm if llvm.startswith('-') else None + if _cc == 'clang' and llvm_version: + _cc = _cc + llvm_version + ccs = { + (_cc, cross_compile) + for cross_compile in cross_compiles } # Give priority for the toolchain the kernel seem to have been compiled # with - def key(cc_cmd): - cc, cmd = cc_cmd - return cc_priority(cc, cmd) + def key(item): + (cc, cross_compile) = item + return cc_priority(cc) - commands = dict(sorted( - commands.items(), - key=key, - )) + ccs = sorted(ccs, key=key) + + cc = None + cross_compile = None # Only run the check on host build env, as other build envs are # expected to be correctly configured. - if build_conf['build-env'] == 'host' and commands: + if build_conf['build-env'] == 'host': + + def test_cmd(cc, cross_compile): + opts = ('-x' 'c', '-c', '-', '-o', '/dev/null') + if 'gcc' in cc: + return (f'{cross_compile}{cc}', *opts) + elif 'clang' in cc: + return (cc, *([f'--target={cross_compile}'] if cross_compile else []), *opts) + else: + raise ValueError(f'Cannot test presence of compiler "{cc}"') + toolchain_path = build_conf['build-env-settings']['host'].get('toolchain-path', None) - for cc, cmd in commands.items(): + for (cc, cross_compile) in ccs: + cmd = test_cmd(cc, cross_compile) + pretty_cmd = ' '.join(cmd) try: subprocess.check_output( @@ -1386,23 +1535,50 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): if cls._check_cc_version(cc, toolchain_path): break else: - raise ValueError(f'Could not find a working toolchain for CROSS_COMPILE={toolchain}') + cross = ' or '.join( + f'CROSS_COMPILE={cross_compile}' + for cross_compile in cross_compiles + ) + cc = make_vars.get('CC') + with_cc = f' with CC={cc}' if cc else '' + raise ValueError(f'Could not find a working toolchain for {cross}{with_cc}') - if cc is None: - raise ValueError(f'Could not detect which compiler to use') + elif build_conf['build-env'] == 'alpine': + alpine_version = build_conf._get_alpine_version() + + if ccs: + for (cc, cross_compile) in ccs: + try: + _find_alpine_cc_packages( + # We check against the package list for the host + # ABI, assuming we will not need emulation to run + # the toolchain. + abi=LISA_HOST_ABI, + version=tuple(alpine_version), + cc=cc, + cross_compile=cross_compile, + ) + except ValueError: + pass + else: + break + else: + ccs, *_ = zip(*ccs) + ccs = ', '.join(sorted(ccs)) + alpine_version = '.'.join(map(str, alpine_version)) + raise ValueError(f'None of the considered toolchains are available on Alpine Linux v{alpine_version}: {ccs}') - logger.info(f'Detected CROSS_COMPILE={toolchain} and CC={cc}') + if cc is None: + raise ValueError(f'Could not detect which compiler to use for CC') - detected = {} - if toolchain: - detected['CROSS_COMPILE'] = toolchain + if cross_compile is None: + raise ValueError(f'Could not detect which CROSS_COMPILE value to use') - make_vars = { - **detected, - **make_vars, - } + ideal_cc = ccs[0][0] + if cc != ideal_cc: + logger.info(f'Could not find ideal CC={ideal_cc} but found CC={cc} instead') - return (make_vars, cc) + return (cc, cross_compile) @classmethod @SerializeViaConstructor.constructor @@ -1451,7 +1627,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): abi = plat_info['abi'] kernel_info = plat_info['kernel'] - build_conf, cc, _abi = cls._resolve_conf(build_conf, abi=abi, target=target) + build_conf, cc, cross_compile, _abi = cls._resolve_conf(build_conf, abi=abi, target=target) assert _abi == abi @contextlib.contextmanager @@ -1656,7 +1832,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): """ logger = cls.get_logger() overlays = overlays or {} - build_conf, cc, abi = cls._resolve_conf(build_conf) + build_conf, cc, cross_compile, abi = cls._resolve_conf(build_conf) def copy_filter(src, dst, remove_obj=False): return not ( @@ -1678,6 +1854,7 @@ class _KernelBuildEnv(Loggable, SerializeViaConstructor): cls._prepare_tree( path, cc=cc, + cross_compile=cross_compile, abi=abi, build_conf=build_conf, apply_overlays=functools.partial(apply_overlays, path), @@ -1937,6 +2114,7 @@ class KmodSrc(Loggable): **(make_vars or {}), } cc = kernel_build_env.cc + cross_compile = kernel_build_env.cross_compile abi = kernel_build_env.abi tree_path = Path(kernel_build_env.path) # "inherit" the build env from the _KernelBuildEnv as we must use the same @@ -1997,6 +2175,7 @@ class KmodSrc(Loggable): def cmd_cm(): with _make_build_chroot( cc=cc, + cross_compile=cross_compile, bind_paths=bind_paths, abi=abi, overlay_backend=build_conf['overlay-backend'], diff --git a/lisa/trace.py b/lisa/trace.py index d5bc53c64c10923da5cfbb67ce81aaad98aae74e..d4ec4bd22c2e45d8ad14b84a5e3bb362b545f9a6 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -5652,13 +5652,22 @@ class FtraceConf(SimpleMultiSrcConf, HideExekallID): :type optional_events: bool """ + def get_key(conf, key): + return conf.get_key(key, quiet=True) + + def get_key_default(conf, key, default): + try: + return get_key(conf, key) + except KeyError: + return default + if not isinstance(conf, self.__class__): conf = self.__class__(conf=conf) def merge_conf(key, val, path): new = _merge_conf(key, val, path) try: - existing = get_nested_key(self, path + [key]) + existing = get_nested_key(self, path + [key], getitem=get_key) except KeyError: return (True, new) else: @@ -5669,13 +5678,13 @@ class FtraceConf(SimpleMultiSrcConf, HideExekallID): def _merge_conf(key, val, path): def non_mergeable(key): - if self.get(key, val) == val: + if get_key_default(self, key, val) == val: return val else: raise KeyError(f'Cannot merge key "{key}": incompatible values specified: {self[key]} != {val}') if key == 'functions': - return sorted(set(val) | set(self.get(key, []))) + return sorted(set(val) | set(get_key_default(self, key, []))) elif key == 'events-namespaces': # We already applied the namespaces to the events so the result # can be cleanly merged according to the original meaning. @@ -5690,7 +5699,7 @@ class FtraceConf(SimpleMultiSrcConf, HideExekallID): # set the namespace to be empty (None, ) def get(conf, key): try: - return conf.get(key) + return get_key(conf, key) except KeyError: return conf.DEFAULT_SRC.get(key) @@ -5698,7 +5707,7 @@ class FtraceConf(SimpleMultiSrcConf, HideExekallID): namespaces=get(conf, 'events-namespaces') ) - self_val = self.get(key, []) + self_val = get_key_default(self, key, []) if not isinstance(self_val, TraceEventCheckerBase): self_val = AndTraceEventChecker.from_events(self_val) @@ -5708,11 +5717,11 @@ class FtraceConf(SimpleMultiSrcConf, HideExekallID): return AndTraceEventChecker([val, self_val]) elif key == 'buffer-size': - return max(val, self.get(key, 0)) + return max(val, get_key_default(self, key, 0)) elif key == 'trace-clock': return non_mergeable(key) elif key == 'saved-cmdlines-nr': - return max(val, self.get(key, 0)) + return max(val, get_key_default(self, key, 0)) elif key == 'tracer': return non_mergeable(key) elif key == 'modules': diff --git a/lisa/utils.py b/lisa/utils.py index 7fc612ffd6085e7b5ffb78f92a7669bae8d3d52f..e7427a8f7ce5dafa53eb4f8d9ed479ba087fe55a 100644 --- a/lisa/utils.py +++ b/lisa/utils.py @@ -64,6 +64,7 @@ import shutil import platform import subprocess import multiprocessing +import urllib.request import ruamel.yaml from ruamel.yaml import YAML @@ -4076,4 +4077,26 @@ def mp_spawn_pool(import_main=False, **kwargs): return pool + +def is_link_dead(url): + """ + Check if link is dead. If dead, returns a truthy value, otherwise a falsy + one. + """ + + # Some HTTP servers (including ReadTheDocs) will return 403 Forbidden + # if no User-Agent is given + headers={ + 'User-Agent': 'Wget/1.13.4 (linux-gnu)', + } + request = urllib.request.Request(url, headers=headers) + try: + urllib.request.urlopen(request) + except (urllib.request.HTTPError, urllib.request.URLError) as e: + return e.reason + else: + return None + + + # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab