diff --git a/fastpath/commands/nouns/bisect.py b/fastpath/commands/nouns/bisect.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2318b6b96fadf65cb989c934907cecb7ea7d08 --- /dev/null +++ b/fastpath/commands/nouns/bisect.py @@ -0,0 +1,69 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import os +import sys + + +from fastpath.commands import cliutils +from fastpath.commands.verbs.bisect import start +from fastpath.commands.verbs.bisect import run +from fastpath.utils import workspace + + +noun_name = os.path.splitext(os.path.basename(__file__))[0] +noun_help = """kernel regression bisection and analysis""" +noun_desc = """This command provides functionality for kernel regression bisection and analysis. + It allows users to prepare the necessary inputs for running kernel regression bisection, + execute the bisection process, and analyze the results. + It includes subcommands for starting the bisection process and running the bisection analysis. + The bisection process identifies which kernel commit introduced a regression, + and the analysis provides insights into the results of the bisection.""" +nounp = None +verbs = {} + + +def add_parser(parser, formatter): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + global nounp + nounp = parser.add_parser( + noun_name, + description=noun_desc, + help=noun_help, + epilog=cliutils.epilog, + formatter_class=formatter, + add_help=False, + ) + + cliutils.add_generic_args(nounp) + + subparsers = nounp.add_subparsers( + dest="verb", + metavar="", + title=f"""Supported verbs (run "{nounp.prog} --help" for more info)""", + ) + + # Register all bisect verbs. + verbs[start.add_parser(subparsers, formatter, lambda _: None)] = start + verbs[run.add_parser(subparsers, formatter, lambda _: None)] = run + + return noun_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + """ + if args.verb not in verbs: + print(f"error: must specify verb\n") + nounp.print_help() + return + + # Dispatch to the correct verb handler. + verbs[args.verb].dispatch(args) diff --git a/fastpath/commands/verbs/bisect/__init__.py b/fastpath/commands/verbs/bisect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/fastpath/commands/verbs/bisect/run.py b/fastpath/commands/verbs/bisect/run.py new file mode 100644 index 0000000000000000000000000000000000000000..0f44a714ab41f93d1c6750f2bbab74875ef9dd3f --- /dev/null +++ b/fastpath/commands/verbs/bisect/run.py @@ -0,0 +1,266 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import argparse +import os +import sys +import tempfile + +import natsort as ns +import numpy as np +import pandas as pd +import yaml + +from fastpath.commands import cliutils +from fastpath.commands.verbs.plan import exec as plan_exec +from fastpath.commands.verbs.result import merge +from fastpath.commands.verbs.result import show +from fastpath.utils import plan as plan_utils +from fastpath.utils.table import ( + Table, + load_tables, + join_results, + filter_results, +) + + +verb_name = os.path.splitext(os.path.basename(__file__))[0] +verb_help = """run bisection on kernel regression candidate""" +verb_desc = """Run kernel regression bisection on the current system under test (SUT). + This command evaluates the current kernel swprofile against a known good + swprofile, using the benchmark defined in the context setup by 'fastpath bisect start'. + It requires the kernel image and modules to be available at specified paths. + It will load the benchmark plan, execute it, and evaluate the results + to determine if the current kernel configuration is an improvement or regression + over the known good swprofile.""" + + +def add_parser(parser, formatter, add_noun_args): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + verbp = parser.add_parser( + verb_name, + formatter_class=formatter, + description=verb_desc, + help=verb_help, + epilog=cliutils.epilog, + add_help=False, + ) + + cliutils.add_generic_args(verbp) + add_noun_args(verbp) + + verbp.add_argument( + "--context", + metavar="", + required=True, + help="""Path to the bisection context file created by 'fastpath bisect start' + (e.g. ./bisect_template.yaml).""", + ) + + verbp.add_argument( + "--pkgtype", + metavar="", + required=False, + default=None, + choices=[None, "RAW", "DEB"], + help="""Describes what "kernel" parameter points to. Either "RAW" (if a + raw kernel Image) or "DEB" (if a Debian package) If omitted, value + is inferred; "DEB" if kernel string ends with ".deb" or "RAW" + otherwise. All required drivers must be built-in for RAW image.""", + ) + + verbp.add_argument( + "--kernel", + metavar="", + required=True, + help="""Kernel package to install. Either a filesystem path or URL.""", + ) + + verbp.add_argument( + "--modules", + metavar="", + required=False, + default=None, + help="""When pkgtype is RAW, modules to install, provided as tarball. + Must be None for other pkgtypes. Either a filesystem path or + URL.""", + ) + + verbp.add_argument( + "--gitsha", + metavar="", + dest="gitsha", + required=True, + default=None, + help="""Git SHA of the kernel build.""", + ) + + return verb_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + """ + + # Load the bisection context YAML file + with open(args.context, "r") as f: + context = yaml.safe_load(f) + + # Extract SUT name, good swprofile, and resultclass name from context + sut = context["plan"]["sut"]["name"] + good_swprofile = context.get("good-swprofile") + resultclass = context.get("resultclass") + + # Extract benchmark suite and name, and construct benchmark identifier + benchmark_suite = context["plan"]["benchmarks"][0]["suite"] + benchmark_name = context["plan"]["benchmarks"][0]["name"] + benchmark = f"{benchmark_suite}/{benchmark_name}" + + # Prepare a temporary plan file with updated swprofile info from args + temp_plan_path = prepare_plan_from_context(context, args) + test_swprofile = context["plan"]["swprofiles"][0]["name"] + + # Prepare arguments for executing the plan + plan_args = argparse.Namespace( + plan=temp_plan_path, + output=context["output-resultstore"], + append=True, + ) + plan_exec.dispatch(plan_args) + + # Load results from the resultstore (merged tables) + resultstore = context["output-resultstore"] + tables = load_tables(resultstore, merge_similar=True) + + # Create a list of swprofiles to compare (good and test) + swprofiles = [good_swprofile, test_swprofile] + + # Filter results to relevant entries for the SUT, swprofiles, and benchmark + sut = context["plan"]["sut"]["name"] + results = join_results(tables) + results = filter_results( + results, + suts=[sut], + swprofiles=swprofiles, + benchmarks=[benchmark], + resultclasses=[resultclass], + ) + + # Analyze changes and report regression/improvement/no change + analyze_changes( + results, + sut, + swprofiles, + benchmark, + resultclass, + test_swprofile, + good_swprofile, + ) + + +def prepare_plan_from_context(context, args): + """ + Prepare a benchmark plan YAML file from the given bisection context and command-line arguments. + + This function updates the first software profile in the context's plan with kernel-related + information provided via command-line arguments, then writes the updated plan to a temporary + YAML file. + + Args: + context (dict): The bisection context loaded from YAML. + args (Namespace): Parsed command-line arguments containing kernel information. + + Returns: + str: The file path to the temporary YAML plan file. + """ + plan = context.get("plan", {}) + swprofiles = plan["swprofiles"] + swprofile = swprofiles[0] + swprofile["name"] = f"bisect-{args.gitsha[:12]}" + swprofile["pkgtype"] = args.pkgtype + swprofile["kernel"] = args.kernel + swprofile["modules"] = args.modules + swprofile["gitsha"] = args.gitsha + + temp_plan_path = "plan.yaml" + + with open(temp_plan_path, "w") as temp_plan_file: + plan_utils.dump(plan, temp_plan_file) + return temp_plan_path + + +def analyze_changes( + results, + sut, + swprofiles, + benchmark, + resultclass, + test_swprofile, + good_swprofile, +): + """ + Analyze benchmark result changes between test and good software profiles and print outcome. + + This function pivots the results, computes changes, and determines if there is a regression, + improvement, or no significant change for the specified benchmark and result class. + + Args: + results (pandas.DataFrame): Filtered benchmark results DataFrame. + sut (str): Name of the system under test. + swprofiles (list): List of software profile names compared. + benchmark (str): Benchmark identifier string. + resultclass (str): Result class name prefix. + test_swprofile (str): Name of the test software profile. + good_swprofile (str): Name of the good (baseline) software profile. + + Returns: + None + + Exits: + Exits with code 1 if regression detected. + Exits with code 0 if improvement or no significant change detected. + """ + pivoted = show.pivot_results(results, [sut], swprofiles) + pivoted = pivoted.xs(key=sut, level="sut", axis=1) + + class Args: + baseline = True + noise_threshold = 0.01 + ascii = False + relative = False + display = "all" + + compute_args = Args() + changed = show.compute_change(pivoted, compute_args) + + try: + row = changed.loc[(benchmark, resultclass)] + except KeyError: + print("ERROR: Could not find row for resultclass in pivoted results.") + sys.exit(2) + + change_val = row[("change", test_swprofile)] + change_val = change_val.iloc[0] + + if change_val == "regression": + print( + f"Result: REGRESSION detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(1) + elif change_val == "improvement": + print( + f"Result: IMPROVEMENT detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(0) + else: + print( + f"Result: NO SIGNIFICANT CHANGE detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(0) diff --git a/fastpath/commands/verbs/bisect/start.py b/fastpath/commands/verbs/bisect/start.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1b06f2e17573bf2c3567ca22b3633ea29c61cd --- /dev/null +++ b/fastpath/commands/verbs/bisect/start.py @@ -0,0 +1,419 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import argparse +import os +import pandas as pd +import tempfile +import yaml + +from fastpath.commands import cliutils +from fastpath.utils import resultstore as rs +from fastpath.utils import workspace +from fastpath.utils.table import ( + Table, + load_tables, + join_results, + filter_results, +) +from fastpath.commands.verbs.result import merge + +verb_name = os.path.splitext(os.path.basename(__file__))[0] +verb_help = """prepare required inputs for kernel regression bisection""" +verb_desc = """Prepare the necessary inputs for running kernel regression bisection. + This command generates a bisection_context.yaml template that includes the system under + test (SUT) configuration, the benchmark to run, and the known-good and known-bad kernel + configurations. It resolves the kernel commit SHAs for the provided good and bad + configurations and checks if they are different. If they are the same, it aborts the + process. The generated bisection_context.yaml is then used to execute the plan on SUT.""" + + +def add_parser(parser, formatter, add_noun_args): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + verbp = parser.add_parser( + verb_name, + formatter_class=formatter, + description=verb_desc, + help=verb_help, + epilog=cliutils.epilog, + add_help=False, + ) + + cliutils.add_generic_args(verbp) + add_noun_args(verbp) + + verbp.add_argument( + "--host", + metavar="", + required=True, + help="""Host name or IP address of the connection, or name of Host in + SSH config file.""", + ) + + verbp.add_argument( + "-u", + "--user", + metavar="", + required=False, + default=None, + help="""Login user for the remote connection. When None, SSH uses its default + configured user, which may be specified in the SSH config file if + host is the name of a Host in the SSH config file.""", + ) + + verbp.add_argument( + "-p", + "--port", + metavar="", + required=False, + default=None, + type=int, + help="""Remote port to connect to. When None, SSH uses its default configured + port, which may be specified in the SSH config file if host is the name + of a Host in the SSH config file.""", + ) + + verbp.add_argument( + "--keyfile", + metavar="", + required=False, + default=None, + help="""Path of private key to use for connection. When None, SSH uses its + default configured private key(s).""", + ) + + verbp.add_argument( + "--sut", + metavar="", + required=True, + help="""System under test (SUT) ID. Must match exactly the value shown in the + 'name' column in SUT.csv from resultstore. This identifies the target + hardware on which the benchmark results were collected and where the + bisection will be executed.""", + ) + + verbp.add_argument( + "--good-swprofile", + metavar="", + required=True, + help="""Known good swprofile ID from the 'name' column in SWPROFILE.csv from + resultstore. This profile must have matching cmdline/sysctl/bootscript + with the bad profile.""", + ) + + verbp.add_argument( + "--bad-swprofile", + metavar="", + required=True, + help="""Known bad swprofile ID from the 'name' column in SWPROFILE.csv from + resultstore. Must differ from the good profile only in kernel_git_sha.""", + ) + + verbp.add_argument( + "--benchmark", + metavar="", + required=True, + help="""Benchmark to bisect, identified by the 'suite/name' field in BENCHMARK.csv + from resultstore (e.g., 'sysbench/thread'). Ensure the benchmark has been run + with the good and bad swprofiles on the specified SUT.""", + ) + + verbp.add_argument( + "--resultclass", + metavar="", + required=True, + help="""Name of the result class used to judge performance. This should match the + 'name' column in RESULTCLASS.csv from resultstore (e.g., 'sysbenchthread-110'). + The unit is matched automatically and does not need to be included. This value + is used to select the appropriate resultclass row when comparing benchmark + performance between the good and bad swprofiles.""", + ) + + verbp.add_argument( + "--context", + metavar="", + required=True, + help="""Path to the output bisection context YAML file (e.g. ./bisection_context.yaml). + This file includes the kernel bisection context and related inputs. The 'plan' + section of this file is consumed by the execution command running on the SUT.""", + ) + + default = workspace.getvar("default", "resultstore") + help_text = """URL encoded to describe a resultstore in either csv, sqlite or + mysql format. csv: csv:/// (although "csv:///" is + optional). sqlite: sqlite:///. mysql: + mysql://:@:/.""" + if default: + help_text += f" Defaults to {default}." + + verbp.add_argument( + "--resultstore", + metavar="", + required=(default is None), + default=default, + help=help_text, + ) + + return verb_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + Access SWPROFILE table and extract kernel_git_sha for good and bad swprofile names. + """ + tables = load_tables(args.resultstore, merge_similar=True) + results = join_results(tables) + result = filter_results( + results, + suts=[args.sut], + swprofiles=[args.good_swprofile, args.bad_swprofile], + benchmarks=[args.benchmark], + ) + + if len(result) == 0: + raise Exception("No results to display after filtering") + + good_sha, bad_sha, shared_fields = validate_swprofiles(args, tables) + + validate_bisection_inputs(result, args) + + benchmark_entry = build_benchmark_entry(args, tables) + + temp_resultstore = merge_baseline_results(args) + + bisection_context = build_bisection_context( + args, + good_sha, + bad_sha, + benchmark_entry, + shared_fields, + temp_resultstore, + ) + + with open(args.context, "w") as f: + yaml.dump( + bisection_context, f, sort_keys=False, default_flow_style=False + ) + + print(f"bisection template yaml written to: {args.context}") + + +def merge_baseline_results(args): + """ + Create a temporary resultstore directory and merge baseline results for good and bad swprofiles. + + Args: + args: Parsed command-line arguments. + + Returns: + Path to the temporary resultstore directory. + """ + temp_resultstore = tempfile.mkdtemp(prefix="bisect-resultstore-") + + # Prepare the merge arguments as expected by merge.py: + merge_args = argparse.Namespace( + resultstore=[args.resultstore], + sut=[args.sut], + swprofile=[args.good_swprofile, args.bad_swprofile], + benchmark=[args.benchmark], + output=temp_resultstore, + append=True, + ) + merge.dispatch(merge_args) + + return temp_resultstore + + +def validate_swprofiles(args, tables): + """ + Validate that both good and bad swprofiles exist and their cmdline, sysctl, and bootscript fields match. + + Args: + args: Parsed command-line arguments. + tables: Loaded tables from the resultstore. + + Returns: + Tuple of good_sha, bad_sha and swprofile shared_fields DataFrames. + """ + swprofile = tables[Table.SWPROFILE] + good_row = swprofile[swprofile["unique"] == args.good_swprofile] + bad_row = swprofile[swprofile["unique"] == args.bad_swprofile] + if good_row.empty: + raise Exception( + f"Could not find swprofile row for good-swprofile: {args.good_swprofile}" + ) + if bad_row.empty: + raise Exception( + f"Could not find swprofile row for bad-swprofile: {args.bad_swprofile}" + ) + + shared_fields = {} + for field in ["cmdline", "sysctl", "bootscript"]: + good_val = good_row.iloc[0][field] + bad_val = bad_row.iloc[0][field] + if good_val != bad_val: + raise Exception( + f"Mismatch in swprofile field '{field}' between good and bad profiles. Aborting bisection." + ) + if isinstance(good_val, list): + shared_fields[field] = good_val + elif good_val is None: + shared_fields[field] = [] + else: + shared_fields[field] = [good_val] + + good_sha = good_row.iloc[0]["kernel_git_sha"] + bad_sha = bad_row.iloc[0]["kernel_git_sha"] + if good_sha == bad_sha: + raise Exception("Good and Bad SHAs are identical. Aborting bisection.") + + return good_sha, bad_sha, shared_fields + + +def validate_bisection_inputs(result, args): + """ + Validate that results for good and bad swprofiles exist and were run on the specified SUT. + Also validate that the specified resultclass exists in the filtered results. + + Args: + result: Filtered results DataFrame including both good and bad swprofiles. + args: Parsed command-line arguments. + """ + good_results = result[result["swprofile"] == args.good_swprofile] + bad_results = result[result["swprofile"] == args.bad_swprofile] + + if good_results.empty: + raise Exception( + "No results found for the good swprofile after filtering" + ) + if bad_results.empty: + raise Exception( + "No results found for the bad swprofile after filtering" + ) + + # Ensure both good and bad results were run on the specified SUT + if not all(good_results["sut"] == args.sut): + raise Exception("Good swprofile was not executed on the specified SUT.") + if not all(bad_results["sut"] == args.sut): + raise Exception("Bad swprofile was not executed on the specified SUT.") + + # Validate that the specified resultclass exists in the filtered results + if args.resultclass not in result["resultclass"].values: + raise Exception( + f"Specified resultclass '{args.resultclass}' not found in the filtered results.\n" + ) + + +def build_benchmark_entry(args, tables): + """ + Build the benchmark entry dictionary from the benchmark table and associated params. + + Args: + args: Parsed command-line arguments. + tables: Loaded tables from the resultstore. + + Returns: + Dictionary containing all benchmark metadata fields: suite, name, type, image, params. + """ + + benchmark_table = tables[Table.BENCHMARK] + param_table = tables[Table.PARAM] + benchmark_row = benchmark_table[benchmark_table["unique"] == args.benchmark] + + # Save benchmark_id before flattening row + benchmark_id = benchmark_row.index[0] + benchmark_row = benchmark_row.iloc[0] + # Gather params by joining PARAM table on benchmark_id + params = {} + for _, row in param_table[ + param_table["benchmark_id"] == benchmark_id + ].iterrows(): + params[row["name"]] = row["value"] + benchmark_entry = { + "suite": benchmark_row.get("suite"), + "name": benchmark_row.get("name"), + "type": benchmark_row.get("type"), + "image": benchmark_row.get("image"), + "params": params, + } + return benchmark_entry + + +def build_bisection_context( + args, good_sha, bad_sha, benchmark_entry, shared_fields, temp_resultstore +): + """ + Build the bisection_context data dictionary for YAML output. + + Args: + args: Parsed command-line arguments. + good_sha: Kernel git SHA for good swprofile. + bad_sha: Kernel git SHA for bad swprofile. + benchmark_entry: Benchmark entry dict. + temp_resultstore: Path to temporary resultstore directory. + + Returns: + Dictionary with bisection_context data. + """ + + # The ID uniquely refers to an object in the resultstore provided by the user. If multiple + # entries share the same name, the ID will be formed by appending the primary key to the name, + # i.e., ":". This ensures uniqueness but is only valid within the original + # resultstore. After copying objects into a new resultstore for bisection, we must determine + # their identities within this new context. + # + # Since only one SUT and one benchmark are copied, their names are guaranteed to be unique and + # remain stable without requiring suffixes. However, we copy two swprofiles (good and bad), and + # while it is theoretically possible they share the same name, this is highly unlikely. Therefore, + # we simplify by assuming swprofile names remain unique after copying. + # + # Additionally, validate_swprofiles ensures that both good and bad swprofiles exist and that they + # tshare the same cmdline, sysctl, and bootscript parameters. This guarantees the kernel is the only + # differing variable during bisection. + def sanitize_id(id): + return id.split(":")[0] + + sut_name = sanitize_id(args.sut) + good_swprofile_name = sanitize_id(args.good_swprofile) + bad_swprofile_name = sanitize_id(args.bad_swprofile) + + bisection_context = { + "plan": { + "sut": { + "name": sut_name, + "connection": { + "method": "SSH", + "params": { + "host": args.host, + "user": args.user, + "port": args.port, + "keyfile": args.keyfile, + }, + }, + }, + "swprofiles": [shared_fields], + "benchmarks": [benchmark_entry], + "defaults": { + "benchmark": { + "warmups": 1, + "repeats": 3, + "sessions": 2, + } + }, + }, + "good-swprofile": good_swprofile_name, + "good_sha": good_sha, + "bad-swprofile": bad_swprofile_name, + "bad_sha": bad_sha, + "resultclass": args.resultclass, + "resultstore": args.resultstore, + "output-resultstore": temp_resultstore, + } + return bisection_context diff --git a/fastpath/fastpath.py b/fastpath/fastpath.py index 15b5586ac2e165a5d49f7b37ce2860030a635314..b60638b4133592e1b063aafb348d8f8ee1b0ff60 100755 --- a/fastpath/fastpath.py +++ b/fastpath/fastpath.py @@ -22,6 +22,7 @@ from fastpath.commands import cliutils from fastpath.utils import workspace +from fastpath.commands.nouns import bisect from fastpath.commands.nouns import plan from fastpath.commands.nouns import preference from fastpath.commands.nouns import result @@ -78,6 +79,7 @@ def main(): # Register all the nouns. nouns = {} + nouns[bisect.add_parser(subparsers, formatter)] = bisect nouns[plan.add_parser(subparsers, formatter)] = plan nouns[preference.add_parser(subparsers, formatter)] = preference nouns[result.add_parser(subparsers, formatter)] = result diff --git a/scripts/build_local_kernel.sh b/scripts/build_local_kernel.sh new file mode 100755 index 0000000000000000000000000000000000000000..3505670b83517582cedd86b5b3b58b0c899bedbe --- /dev/null +++ b/scripts/build_local_kernel.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +# This script builds the Linux kernel using tuxmake. +# It builds the locally available Linux repo specified in LINUX_REPO_PATH. +set -e + +# Defaults +ARCH="${ARCH:-arm64}" +CONFIG="${CONFIG:-defconfig}" +TOOLCHAIN="${TOOLCHAIN:-gcc-12}" +EXTRA_CONFIG="${EXTRA_CONFIG:+${EXTRA_CONFIG};}$(dirname "$0")/../kconfigs/fastpath.frag" +LINUX_REPO_PATH="${LINUX_REPO_PATH:-$(pwd)}" +mkdir -p /tmp/bisect +OUT_DIR="${OUT_DIR:-$(mktemp -d /tmp/bisect/out.XXXXXXXX)}" +WORK_DIR="${WORK_DIR:-$(mktemp -d /tmp/bisect/work.XXXXXXXX)}" +if [ -n "${CI}" ]; then + RUNTIME="${RUNTIME:-null}" +else + RUNTIME="${RUNTIME:-docker}" +fi + +# Prepare directories +mkdir -p "${OUT_DIR}" "${WORK_DIR}" + +# Build tuxmake config +TUXMAKE_CONFIG="-k ${CONFIG}" +if [ -n "${EXTRA_CONFIG}" ]; then + TUXMAKE_CONFIG="${TUXMAKE_CONFIG} -K ${EXTRA_CONFIG}" +fi + +# Clean untracked files in the repo if present +if [ -d "${LINUX_REPO_PATH}" ]; then + pushd "${LINUX_REPO_PATH}" >/dev/null + git clean -d -x -f + popd >/dev/null +fi + +# Build kernel +echo "Building kernel with tuxmake..." +if ! tuxmake \ + -r "${RUNTIME}" \ + -a "${ARCH}" \ + --toolchain "${TOOLCHAIN}" \ + --directory "${LINUX_REPO_PATH}" \ + ${TUXMAKE_CONFIG} \ + --out "${OUT_DIR}" \ + -b "${WORK_DIR}" \ + config kernel modules; then + echo "Kernel build failed. Aborting." >&2 + exit 125 +fi + +# Write kernel and modules path, and kernel git SHA to .env file for sourcing by other scripts +ENV_FILE="$(dirname "$0")/.env" +# Retrieve current git commit SHA from the kernel source repo +if [ -d "${LINUX_REPO_PATH}" ]; then + pushd "${LINUX_REPO_PATH}" >/dev/null + GITSHA="$(git rev-parse HEAD)" + popd >/dev/null +else + GITSHA="" +fi + +echo "GITSHA=${GITSHA}" + +cat > "$ENV_FILE" <