From 345811701dbbbc6e48b937415b3572fd6f4b38c4 Mon Sep 17 00:00:00 2001 From: Aishwarya TCV Date: Thu, 5 Jun 2025 01:08:29 +0100 Subject: [PATCH 1/5] cli: scaffold bisect noun with start and run verbs Introduce initial structure for the `bisect` noun with `start` and `run` verbs in the FastPath CLI. These will support automated kernel regression bisection workflows. - Adds CLI argument parsing for `fastpath bisect start` - Adds CLI argument parsing for `fastpath bisect run` - Registers both verbs under a new `bisect` noun - Includes placeholder dispatch() methods for future logic This sets up the interface and CLI plumbing in preparation for the bisection plan generation and benchmark evaluation implementation. Signed-off-by: Aishwarya TCV --- fastpath/commands/nouns/bisect.py | 69 +++++++++ fastpath/commands/verbs/bisect/__init__.py | 0 fastpath/commands/verbs/bisect/run.py | 102 +++++++++++++ fastpath/commands/verbs/bisect/start.py | 163 +++++++++++++++++++++ fastpath/fastpath.py | 2 + 5 files changed, 336 insertions(+) create mode 100644 fastpath/commands/nouns/bisect.py create mode 100644 fastpath/commands/verbs/bisect/__init__.py create mode 100644 fastpath/commands/verbs/bisect/run.py create mode 100644 fastpath/commands/verbs/bisect/start.py diff --git a/fastpath/commands/nouns/bisect.py b/fastpath/commands/nouns/bisect.py new file mode 100644 index 0000000..fa2318b --- /dev/null +++ b/fastpath/commands/nouns/bisect.py @@ -0,0 +1,69 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import os +import sys + + +from fastpath.commands import cliutils +from fastpath.commands.verbs.bisect import start +from fastpath.commands.verbs.bisect import run +from fastpath.utils import workspace + + +noun_name = os.path.splitext(os.path.basename(__file__))[0] +noun_help = """kernel regression bisection and analysis""" +noun_desc = """This command provides functionality for kernel regression bisection and analysis. + It allows users to prepare the necessary inputs for running kernel regression bisection, + execute the bisection process, and analyze the results. + It includes subcommands for starting the bisection process and running the bisection analysis. + The bisection process identifies which kernel commit introduced a regression, + and the analysis provides insights into the results of the bisection.""" +nounp = None +verbs = {} + + +def add_parser(parser, formatter): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + global nounp + nounp = parser.add_parser( + noun_name, + description=noun_desc, + help=noun_help, + epilog=cliutils.epilog, + formatter_class=formatter, + add_help=False, + ) + + cliutils.add_generic_args(nounp) + + subparsers = nounp.add_subparsers( + dest="verb", + metavar="", + title=f"""Supported verbs (run "{nounp.prog} --help" for more info)""", + ) + + # Register all bisect verbs. + verbs[start.add_parser(subparsers, formatter, lambda _: None)] = start + verbs[run.add_parser(subparsers, formatter, lambda _: None)] = run + + return noun_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + """ + if args.verb not in verbs: + print(f"error: must specify verb\n") + nounp.print_help() + return + + # Dispatch to the correct verb handler. + verbs[args.verb].dispatch(args) diff --git a/fastpath/commands/verbs/bisect/__init__.py b/fastpath/commands/verbs/bisect/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/fastpath/commands/verbs/bisect/run.py b/fastpath/commands/verbs/bisect/run.py new file mode 100644 index 0000000..cbe0c59 --- /dev/null +++ b/fastpath/commands/verbs/bisect/run.py @@ -0,0 +1,102 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import os +import sys +import yaml +import argparse +import pandas as pd +import numpy as np + +from fastpath.commands import cliutils +from fastpath.commands.verbs.plan import exec as plan_exec +from fastpath.commands.verbs.result import show +from fastpath.utils.table import Table, load_tables +from fastpath.utils import workspace + + +verb_name = os.path.splitext(os.path.basename(__file__))[0] +verb_help = """run bisection on kernel regression candidate""" +verb_desc = """Run kernel regression bisection on the current system under test (SUT). + This command evaluates the current kernel swprofile against a known good + swprofile, using the benchmark defined in the context setup by 'fastpath bisect start'. + It requires the kernel image and modules to be available at specified paths. + It will load the benchmark plan, execute it, and evaluate the results + to determine if the current kernel configuration is an improvement or regression + over the known good swprofile.""" + + +def add_parser(parser, formatter, add_noun_args): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + verbp = parser.add_parser( + verb_name, + formatter_class=formatter, + description=verb_desc, + help=verb_help, + epilog=cliutils.epilog, + add_help=False, + ) + + cliutils.add_generic_args(verbp) + add_noun_args(verbp) + + verbp.add_argument( + "--context", + metavar="", + required=True, + help="""Path to the bisection context file created by 'fastpath bisect start' + (e.g. ./bisect_template.yaml).""", + ) + + verbp.add_argument( + "--pkgtype", + metavar="", + required=False, + default=None, + choices=[None, "RAW", "DEB"], + help="""Describes what "kernel" parameter points to. Either "RAW" (if a + raw kernel Image) or "DEB" (if a Debian package) If omitted, value + is inferred; "DEB" if kernel string ends with ".deb" or "RAW" + otherwise. All required drivers must be built-in for RAW image.""", + ) + + verbp.add_argument( + "--kernel", + metavar="", + required=True, + help="""Kernel package to install. Either a filesystem path or URL.""", + ) + + verbp.add_argument( + "--modules", + metavar="", + required=False, + default=None, + help="""When pkgtype is RAW, modules to install, provided as tarball. + Must be None for other pkgtypes. Either a filesystem path or + URL.""", + ) + + verbp.add_argument( + "--gitsha", + metavar="", + dest="gitsha", + required=True, + default=None, + help="""Git SHA of the kernel build.""", + ) + + return verb_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + """ + print("run.py dispatch function.") diff --git a/fastpath/commands/verbs/bisect/start.py b/fastpath/commands/verbs/bisect/start.py new file mode 100644 index 0000000..1199a6a --- /dev/null +++ b/fastpath/commands/verbs/bisect/start.py @@ -0,0 +1,163 @@ +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +import os +import yaml + +from fastpath.commands import cliutils +from fastpath.utils import resultstore as rs +from fastpath.utils import workspace +from fastpath.utils.table import Table, load_tables + + +verb_name = os.path.splitext(os.path.basename(__file__))[0] +verb_help = """prepare required inputs for kernel regression bisection""" +verb_desc = """Prepare the necessary inputs for running kernel regression bisection. + This command generates a plan.yaml template that includes the system under test (SUT) configuration, + the benchmark to run, and the known-good and known-bad kernel configurations. + It resolves the kernel commit SHAs for the provided good and bad configurations + and checks if they are different. If they are the same, it aborts the process. + The generated plan.yaml is then used to execute the plan on SUT.""" + + +def add_parser(parser, formatter, add_noun_args): + """ + Part of the command interface expected by fastpath.py. Adds the subcommand + to the parser, along with all options and documentation. Returns the + subcommand name. + """ + verbp = parser.add_parser( + verb_name, + formatter_class=formatter, + description=verb_desc, + help=verb_help, + epilog=cliutils.epilog, + add_help=False, + ) + + cliutils.add_generic_args(verbp) + add_noun_args(verbp) + + verbp.add_argument( + "--host", + metavar="", + required=True, + help="""Host name or IP address of the connection, or name of Host in + SSH config file.""", + ) + + verbp.add_argument( + "-u", + "--user", + metavar="", + required=False, + default=None, + help="""Login user for the remote connection. When None, SSH uses its default + configured user, which may be specified in the SSH config file if + host is the name of a Host in the SSH config file.""", + ) + + verbp.add_argument( + "-p", + "--port", + metavar="", + required=False, + default=None, + type=int, + help="""Remote port to connect to. When None, SSH uses its default configured + port, which may be specified in the SSH config file if host is the name + of a Host in the SSH config file.""", + ) + + verbp.add_argument( + "--keyfile", + metavar="", + required=False, + default=None, + help="""Path of private key to use for connection. When None, SSH uses its + default configured private key(s).""", + ) + + verbp.add_argument( + "--sut", + metavar="", + required=True, + help="""System under test (SUT) ID. Must match exactly the value shown in the + 'name' column in SUT.csv from resultstore. This identifies the target + hardware on which the benchmark results were collected and where the + bisection will be executed.""", + ) + + verbp.add_argument( + "--good-swprofile", + metavar="", + required=True, + help="""Known good swprofile ID from the 'name' column in SWPROFILE.csv from + resultstore. This profile must have matching cmdline/sysctl/bootscript + with the bad profile.""", + ) + + verbp.add_argument( + "--bad-swprofile", + metavar="", + required=True, + help="""Known bad swprofile ID from the 'name' column in SWPROFILE.csv from + resultstore. Must differ from the good profile only in kernel_git_sha.""", + ) + + verbp.add_argument( + "--benchmark", + metavar="", + required=True, + help="""Benchmark to bisect, identified by the 'suite/name' field in BENCHMARK.csv + from resultstore (e.g., 'sysbench/thread'). Ensure the benchmark has been run + with the good and bad swprofiles on the specified SUT.""", + ) + + verbp.add_argument( + "--resultclass", + metavar="", + required=True, + help="""Name of the result class used to judge performance. This should match the + 'name (unit)' column in RESULTCLASS.csv from resultstore + (e.g., 'sysbenchthread-110 (usec)'). This value is used to select the + appropriate resultclass row when comparing benchmark performance between + the good and bad swprofiles.""", + ) + + verbp.add_argument( + "--context", + metavar="", + required=True, + help="""Path to the output bisection context YAML file (e.g. ./bisection_context.yaml). + This file includes the kernel bisection plan and related inputs. The 'plan' + section of this file is consumed by the execution command running on the SUT.""", + ) + + default = workspace.getvar("default", "resultstore") + help_text = """URL encoded to describe a resultstore in either csv, sqlite or + mysql format. csv: csv:/// (although "csv:///" is + optional). sqlite: sqlite:///. mysql: + mysql://:@:/.""" + if default: + help_text += f" Defaults to {default}." + + verbp.add_argument( + "--resultstore", + metavar="", + required=(default is None), + default=default, + help=help_text, + ) + + return verb_name + + +def dispatch(args): + """ + Part of the command interface expected by fastpath.py. Called to execute the + subcommand, with the arguments the user passed on the command line. The + arguments comply with those requested in add_parser(). + Access SWPROFILE table and extract kernel_git_sha for good and bad swprofile names. + """ + print("start.py dispatch function.") diff --git a/fastpath/fastpath.py b/fastpath/fastpath.py index 15b5586..b60638b 100755 --- a/fastpath/fastpath.py +++ b/fastpath/fastpath.py @@ -22,6 +22,7 @@ from fastpath.commands import cliutils from fastpath.utils import workspace +from fastpath.commands.nouns import bisect from fastpath.commands.nouns import plan from fastpath.commands.nouns import preference from fastpath.commands.nouns import result @@ -78,6 +79,7 @@ def main(): # Register all the nouns. nouns = {} + nouns[bisect.add_parser(subparsers, formatter)] = bisect nouns[plan.add_parser(subparsers, formatter)] = plan nouns[preference.add_parser(subparsers, formatter)] = preference nouns[result.add_parser(subparsers, formatter)] = result -- GitLab From 15009e0837a2e99ba70217e8b9502cb591b83f44 Mon Sep 17 00:00:00 2001 From: Aishwarya TCV Date: Thu, 12 Jun 2025 17:36:59 +0100 Subject: [PATCH 2/5] cli: implement dispatch logic for bisect template generation Update dispatch() of 'start' verb for preparing bisection inputs. This routine loads resultstore tables and extracts kernel SHAs for the given good and bad SWPROFILEs. If both SHAs match, the process aborts early to avoid a meaningless bisection. The SUT name is resolved by tracing the good SWPROFILE through the RESULT and SUT tables. A check ensures the bad SWPROFILE was also run on the same SUT. If not, bisection is aborted to maintain consistency of execution environment. A bisection template is then generated using the resolved SUT, benchmark reference, and commit metadata. Signed-off-by: Aishwarya TCV --- fastpath/commands/verbs/bisect/start.py | 282 ++++++++++++++++++++++-- 1 file changed, 269 insertions(+), 13 deletions(-) diff --git a/fastpath/commands/verbs/bisect/start.py b/fastpath/commands/verbs/bisect/start.py index 1199a6a..6a1b06f 100644 --- a/fastpath/commands/verbs/bisect/start.py +++ b/fastpath/commands/verbs/bisect/start.py @@ -1,23 +1,31 @@ # Copyright (c) 2025, Arm Limited. # SPDX-License-Identifier: MIT +import argparse import os +import pandas as pd +import tempfile import yaml from fastpath.commands import cliutils from fastpath.utils import resultstore as rs from fastpath.utils import workspace -from fastpath.utils.table import Table, load_tables - +from fastpath.utils.table import ( + Table, + load_tables, + join_results, + filter_results, +) +from fastpath.commands.verbs.result import merge verb_name = os.path.splitext(os.path.basename(__file__))[0] verb_help = """prepare required inputs for kernel regression bisection""" verb_desc = """Prepare the necessary inputs for running kernel regression bisection. - This command generates a plan.yaml template that includes the system under test (SUT) configuration, - the benchmark to run, and the known-good and known-bad kernel configurations. - It resolves the kernel commit SHAs for the provided good and bad configurations - and checks if they are different. If they are the same, it aborts the process. - The generated plan.yaml is then used to execute the plan on SUT.""" + This command generates a bisection_context.yaml template that includes the system under + test (SUT) configuration, the benchmark to run, and the known-good and known-bad kernel + configurations. It resolves the kernel commit SHAs for the provided good and bad + configurations and checks if they are different. If they are the same, it aborts the + process. The generated bisection_context.yaml is then used to execute the plan on SUT.""" def add_parser(parser, formatter, add_noun_args): @@ -119,10 +127,10 @@ def add_parser(parser, formatter, add_noun_args): metavar="", required=True, help="""Name of the result class used to judge performance. This should match the - 'name (unit)' column in RESULTCLASS.csv from resultstore - (e.g., 'sysbenchthread-110 (usec)'). This value is used to select the - appropriate resultclass row when comparing benchmark performance between - the good and bad swprofiles.""", + 'name' column in RESULTCLASS.csv from resultstore (e.g., 'sysbenchthread-110'). + The unit is matched automatically and does not need to be included. This value + is used to select the appropriate resultclass row when comparing benchmark + performance between the good and bad swprofiles.""", ) verbp.add_argument( @@ -130,7 +138,7 @@ def add_parser(parser, formatter, add_noun_args): metavar="", required=True, help="""Path to the output bisection context YAML file (e.g. ./bisection_context.yaml). - This file includes the kernel bisection plan and related inputs. The 'plan' + This file includes the kernel bisection context and related inputs. The 'plan' section of this file is consumed by the execution command running on the SUT.""", ) @@ -160,4 +168,252 @@ def dispatch(args): arguments comply with those requested in add_parser(). Access SWPROFILE table and extract kernel_git_sha for good and bad swprofile names. """ - print("start.py dispatch function.") + tables = load_tables(args.resultstore, merge_similar=True) + results = join_results(tables) + result = filter_results( + results, + suts=[args.sut], + swprofiles=[args.good_swprofile, args.bad_swprofile], + benchmarks=[args.benchmark], + ) + + if len(result) == 0: + raise Exception("No results to display after filtering") + + good_sha, bad_sha, shared_fields = validate_swprofiles(args, tables) + + validate_bisection_inputs(result, args) + + benchmark_entry = build_benchmark_entry(args, tables) + + temp_resultstore = merge_baseline_results(args) + + bisection_context = build_bisection_context( + args, + good_sha, + bad_sha, + benchmark_entry, + shared_fields, + temp_resultstore, + ) + + with open(args.context, "w") as f: + yaml.dump( + bisection_context, f, sort_keys=False, default_flow_style=False + ) + + print(f"bisection template yaml written to: {args.context}") + + +def merge_baseline_results(args): + """ + Create a temporary resultstore directory and merge baseline results for good and bad swprofiles. + + Args: + args: Parsed command-line arguments. + + Returns: + Path to the temporary resultstore directory. + """ + temp_resultstore = tempfile.mkdtemp(prefix="bisect-resultstore-") + + # Prepare the merge arguments as expected by merge.py: + merge_args = argparse.Namespace( + resultstore=[args.resultstore], + sut=[args.sut], + swprofile=[args.good_swprofile, args.bad_swprofile], + benchmark=[args.benchmark], + output=temp_resultstore, + append=True, + ) + merge.dispatch(merge_args) + + return temp_resultstore + + +def validate_swprofiles(args, tables): + """ + Validate that both good and bad swprofiles exist and their cmdline, sysctl, and bootscript fields match. + + Args: + args: Parsed command-line arguments. + tables: Loaded tables from the resultstore. + + Returns: + Tuple of good_sha, bad_sha and swprofile shared_fields DataFrames. + """ + swprofile = tables[Table.SWPROFILE] + good_row = swprofile[swprofile["unique"] == args.good_swprofile] + bad_row = swprofile[swprofile["unique"] == args.bad_swprofile] + if good_row.empty: + raise Exception( + f"Could not find swprofile row for good-swprofile: {args.good_swprofile}" + ) + if bad_row.empty: + raise Exception( + f"Could not find swprofile row for bad-swprofile: {args.bad_swprofile}" + ) + + shared_fields = {} + for field in ["cmdline", "sysctl", "bootscript"]: + good_val = good_row.iloc[0][field] + bad_val = bad_row.iloc[0][field] + if good_val != bad_val: + raise Exception( + f"Mismatch in swprofile field '{field}' between good and bad profiles. Aborting bisection." + ) + if isinstance(good_val, list): + shared_fields[field] = good_val + elif good_val is None: + shared_fields[field] = [] + else: + shared_fields[field] = [good_val] + + good_sha = good_row.iloc[0]["kernel_git_sha"] + bad_sha = bad_row.iloc[0]["kernel_git_sha"] + if good_sha == bad_sha: + raise Exception("Good and Bad SHAs are identical. Aborting bisection.") + + return good_sha, bad_sha, shared_fields + + +def validate_bisection_inputs(result, args): + """ + Validate that results for good and bad swprofiles exist and were run on the specified SUT. + Also validate that the specified resultclass exists in the filtered results. + + Args: + result: Filtered results DataFrame including both good and bad swprofiles. + args: Parsed command-line arguments. + """ + good_results = result[result["swprofile"] == args.good_swprofile] + bad_results = result[result["swprofile"] == args.bad_swprofile] + + if good_results.empty: + raise Exception( + "No results found for the good swprofile after filtering" + ) + if bad_results.empty: + raise Exception( + "No results found for the bad swprofile after filtering" + ) + + # Ensure both good and bad results were run on the specified SUT + if not all(good_results["sut"] == args.sut): + raise Exception("Good swprofile was not executed on the specified SUT.") + if not all(bad_results["sut"] == args.sut): + raise Exception("Bad swprofile was not executed on the specified SUT.") + + # Validate that the specified resultclass exists in the filtered results + if args.resultclass not in result["resultclass"].values: + raise Exception( + f"Specified resultclass '{args.resultclass}' not found in the filtered results.\n" + ) + + +def build_benchmark_entry(args, tables): + """ + Build the benchmark entry dictionary from the benchmark table and associated params. + + Args: + args: Parsed command-line arguments. + tables: Loaded tables from the resultstore. + + Returns: + Dictionary containing all benchmark metadata fields: suite, name, type, image, params. + """ + + benchmark_table = tables[Table.BENCHMARK] + param_table = tables[Table.PARAM] + benchmark_row = benchmark_table[benchmark_table["unique"] == args.benchmark] + + # Save benchmark_id before flattening row + benchmark_id = benchmark_row.index[0] + benchmark_row = benchmark_row.iloc[0] + # Gather params by joining PARAM table on benchmark_id + params = {} + for _, row in param_table[ + param_table["benchmark_id"] == benchmark_id + ].iterrows(): + params[row["name"]] = row["value"] + benchmark_entry = { + "suite": benchmark_row.get("suite"), + "name": benchmark_row.get("name"), + "type": benchmark_row.get("type"), + "image": benchmark_row.get("image"), + "params": params, + } + return benchmark_entry + + +def build_bisection_context( + args, good_sha, bad_sha, benchmark_entry, shared_fields, temp_resultstore +): + """ + Build the bisection_context data dictionary for YAML output. + + Args: + args: Parsed command-line arguments. + good_sha: Kernel git SHA for good swprofile. + bad_sha: Kernel git SHA for bad swprofile. + benchmark_entry: Benchmark entry dict. + temp_resultstore: Path to temporary resultstore directory. + + Returns: + Dictionary with bisection_context data. + """ + + # The ID uniquely refers to an object in the resultstore provided by the user. If multiple + # entries share the same name, the ID will be formed by appending the primary key to the name, + # i.e., ":". This ensures uniqueness but is only valid within the original + # resultstore. After copying objects into a new resultstore for bisection, we must determine + # their identities within this new context. + # + # Since only one SUT and one benchmark are copied, their names are guaranteed to be unique and + # remain stable without requiring suffixes. However, we copy two swprofiles (good and bad), and + # while it is theoretically possible they share the same name, this is highly unlikely. Therefore, + # we simplify by assuming swprofile names remain unique after copying. + # + # Additionally, validate_swprofiles ensures that both good and bad swprofiles exist and that they + # tshare the same cmdline, sysctl, and bootscript parameters. This guarantees the kernel is the only + # differing variable during bisection. + def sanitize_id(id): + return id.split(":")[0] + + sut_name = sanitize_id(args.sut) + good_swprofile_name = sanitize_id(args.good_swprofile) + bad_swprofile_name = sanitize_id(args.bad_swprofile) + + bisection_context = { + "plan": { + "sut": { + "name": sut_name, + "connection": { + "method": "SSH", + "params": { + "host": args.host, + "user": args.user, + "port": args.port, + "keyfile": args.keyfile, + }, + }, + }, + "swprofiles": [shared_fields], + "benchmarks": [benchmark_entry], + "defaults": { + "benchmark": { + "warmups": 1, + "repeats": 3, + "sessions": 2, + } + }, + }, + "good-swprofile": good_swprofile_name, + "good_sha": good_sha, + "bad-swprofile": bad_swprofile_name, + "bad_sha": bad_sha, + "resultclass": args.resultclass, + "resultstore": args.resultstore, + "output-resultstore": temp_resultstore, + } + return bisection_context -- GitLab From 16cc9591bd5ea3b2e395c45a6fe5f0149824f123 Mon Sep 17 00:00:00 2001 From: Aishwarya TCV Date: Fri, 13 Jun 2025 13:56:43 +0100 Subject: [PATCH 3/5] cli: execute benchmark plan and implement result evaluation and verdict logic for bisect run This patch includes the implementation of the `bisect run` subcommand in the CLI. It resolves and updates the bisection template by injecting kernel image and module paths, then executes the benchmark plan using plan_exec to generate new result data. After execution, the results are loaded and joined with metadata tables to build a comprehensive dataset. The data is filtered by benchmark and resultclass, with support for partial string matches and normalized profiles. The latest run is assumed as the test profile. It uses show.pivot_results() to compute statistical summaries, and reduces the pivoted output using slicing to match the expectations of compute_change(). The change status is extracted and reported as regression, improvement, or no significant change. This enables automated result analysis and verdict detection as part of kernel regression bisection. Signed-off-by: Aishwarya TCV --- fastpath/commands/verbs/bisect/run.py | 176 +++++++++++++++++++++++++- 1 file changed, 170 insertions(+), 6 deletions(-) diff --git a/fastpath/commands/verbs/bisect/run.py b/fastpath/commands/verbs/bisect/run.py index cbe0c59..0f44a71 100644 --- a/fastpath/commands/verbs/bisect/run.py +++ b/fastpath/commands/verbs/bisect/run.py @@ -1,18 +1,27 @@ # Copyright (c) 2025, Arm Limited. # SPDX-License-Identifier: MIT +import argparse import os import sys -import yaml -import argparse -import pandas as pd +import tempfile + +import natsort as ns import numpy as np +import pandas as pd +import yaml from fastpath.commands import cliutils from fastpath.commands.verbs.plan import exec as plan_exec +from fastpath.commands.verbs.result import merge from fastpath.commands.verbs.result import show -from fastpath.utils.table import Table, load_tables -from fastpath.utils import workspace +from fastpath.utils import plan as plan_utils +from fastpath.utils.table import ( + Table, + load_tables, + join_results, + filter_results, +) verb_name = os.path.splitext(os.path.basename(__file__))[0] @@ -99,4 +108,159 @@ def dispatch(args): subcommand, with the arguments the user passed on the command line. The arguments comply with those requested in add_parser(). """ - print("run.py dispatch function.") + + # Load the bisection context YAML file + with open(args.context, "r") as f: + context = yaml.safe_load(f) + + # Extract SUT name, good swprofile, and resultclass name from context + sut = context["plan"]["sut"]["name"] + good_swprofile = context.get("good-swprofile") + resultclass = context.get("resultclass") + + # Extract benchmark suite and name, and construct benchmark identifier + benchmark_suite = context["plan"]["benchmarks"][0]["suite"] + benchmark_name = context["plan"]["benchmarks"][0]["name"] + benchmark = f"{benchmark_suite}/{benchmark_name}" + + # Prepare a temporary plan file with updated swprofile info from args + temp_plan_path = prepare_plan_from_context(context, args) + test_swprofile = context["plan"]["swprofiles"][0]["name"] + + # Prepare arguments for executing the plan + plan_args = argparse.Namespace( + plan=temp_plan_path, + output=context["output-resultstore"], + append=True, + ) + plan_exec.dispatch(plan_args) + + # Load results from the resultstore (merged tables) + resultstore = context["output-resultstore"] + tables = load_tables(resultstore, merge_similar=True) + + # Create a list of swprofiles to compare (good and test) + swprofiles = [good_swprofile, test_swprofile] + + # Filter results to relevant entries for the SUT, swprofiles, and benchmark + sut = context["plan"]["sut"]["name"] + results = join_results(tables) + results = filter_results( + results, + suts=[sut], + swprofiles=swprofiles, + benchmarks=[benchmark], + resultclasses=[resultclass], + ) + + # Analyze changes and report regression/improvement/no change + analyze_changes( + results, + sut, + swprofiles, + benchmark, + resultclass, + test_swprofile, + good_swprofile, + ) + + +def prepare_plan_from_context(context, args): + """ + Prepare a benchmark plan YAML file from the given bisection context and command-line arguments. + + This function updates the first software profile in the context's plan with kernel-related + information provided via command-line arguments, then writes the updated plan to a temporary + YAML file. + + Args: + context (dict): The bisection context loaded from YAML. + args (Namespace): Parsed command-line arguments containing kernel information. + + Returns: + str: The file path to the temporary YAML plan file. + """ + plan = context.get("plan", {}) + swprofiles = plan["swprofiles"] + swprofile = swprofiles[0] + swprofile["name"] = f"bisect-{args.gitsha[:12]}" + swprofile["pkgtype"] = args.pkgtype + swprofile["kernel"] = args.kernel + swprofile["modules"] = args.modules + swprofile["gitsha"] = args.gitsha + + temp_plan_path = "plan.yaml" + + with open(temp_plan_path, "w") as temp_plan_file: + plan_utils.dump(plan, temp_plan_file) + return temp_plan_path + + +def analyze_changes( + results, + sut, + swprofiles, + benchmark, + resultclass, + test_swprofile, + good_swprofile, +): + """ + Analyze benchmark result changes between test and good software profiles and print outcome. + + This function pivots the results, computes changes, and determines if there is a regression, + improvement, or no significant change for the specified benchmark and result class. + + Args: + results (pandas.DataFrame): Filtered benchmark results DataFrame. + sut (str): Name of the system under test. + swprofiles (list): List of software profile names compared. + benchmark (str): Benchmark identifier string. + resultclass (str): Result class name prefix. + test_swprofile (str): Name of the test software profile. + good_swprofile (str): Name of the good (baseline) software profile. + + Returns: + None + + Exits: + Exits with code 1 if regression detected. + Exits with code 0 if improvement or no significant change detected. + """ + pivoted = show.pivot_results(results, [sut], swprofiles) + pivoted = pivoted.xs(key=sut, level="sut", axis=1) + + class Args: + baseline = True + noise_threshold = 0.01 + ascii = False + relative = False + display = "all" + + compute_args = Args() + changed = show.compute_change(pivoted, compute_args) + + try: + row = changed.loc[(benchmark, resultclass)] + except KeyError: + print("ERROR: Could not find row for resultclass in pivoted results.") + sys.exit(2) + + change_val = row[("change", test_swprofile)] + change_val = change_val.iloc[0] + + if change_val == "regression": + print( + f"Result: REGRESSION detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(1) + elif change_val == "improvement": + print( + f"Result: IMPROVEMENT detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(0) + else: + print( + f"Result: NO SIGNIFICANT CHANGE detected for resultclass '{resultclass}' comparing '{test_swprofile}' vs '{good_swprofile}'." + ) + sys.exit(0) -- GitLab From 162a594727a87c4890c0f791cfca6e38f2dde8a4 Mon Sep 17 00:00:00 2001 From: Aishwarya TCV Date: Fri, 13 Jun 2025 14:57:38 +0100 Subject: [PATCH 4/5] scripts: add kernel build script using tuxmake for local Linux repo This script provides an example of how to build a Linux kernel using tuxmake, targeting both CI and local developer workflows. It assumes a locally available Linux source tree, the location of which can be configured using the LINUX_REPO_PATH environment variable. It sets default values for architecture, toolchain, config, and output paths, while allowing overrides for custom configurations. It selects the tuxmake runtime based on context: 'null' for CI systems, or 'docker' if available locally. The script ensures a clean build environment by removing untracked files, then invokes tuxmake to build the kernel, modules, and config. If an Image.gz is generated, it is uncompressed to produce an Image file suitable for booting. Signed-off-by: Aishwarya TCV --- scripts/build_local_kernel.sh | 73 +++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100755 scripts/build_local_kernel.sh diff --git a/scripts/build_local_kernel.sh b/scripts/build_local_kernel.sh new file mode 100755 index 0000000..3505670 --- /dev/null +++ b/scripts/build_local_kernel.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +# This script builds the Linux kernel using tuxmake. +# It builds the locally available Linux repo specified in LINUX_REPO_PATH. +set -e + +# Defaults +ARCH="${ARCH:-arm64}" +CONFIG="${CONFIG:-defconfig}" +TOOLCHAIN="${TOOLCHAIN:-gcc-12}" +EXTRA_CONFIG="${EXTRA_CONFIG:+${EXTRA_CONFIG};}$(dirname "$0")/../kconfigs/fastpath.frag" +LINUX_REPO_PATH="${LINUX_REPO_PATH:-$(pwd)}" +mkdir -p /tmp/bisect +OUT_DIR="${OUT_DIR:-$(mktemp -d /tmp/bisect/out.XXXXXXXX)}" +WORK_DIR="${WORK_DIR:-$(mktemp -d /tmp/bisect/work.XXXXXXXX)}" +if [ -n "${CI}" ]; then + RUNTIME="${RUNTIME:-null}" +else + RUNTIME="${RUNTIME:-docker}" +fi + +# Prepare directories +mkdir -p "${OUT_DIR}" "${WORK_DIR}" + +# Build tuxmake config +TUXMAKE_CONFIG="-k ${CONFIG}" +if [ -n "${EXTRA_CONFIG}" ]; then + TUXMAKE_CONFIG="${TUXMAKE_CONFIG} -K ${EXTRA_CONFIG}" +fi + +# Clean untracked files in the repo if present +if [ -d "${LINUX_REPO_PATH}" ]; then + pushd "${LINUX_REPO_PATH}" >/dev/null + git clean -d -x -f + popd >/dev/null +fi + +# Build kernel +echo "Building kernel with tuxmake..." +if ! tuxmake \ + -r "${RUNTIME}" \ + -a "${ARCH}" \ + --toolchain "${TOOLCHAIN}" \ + --directory "${LINUX_REPO_PATH}" \ + ${TUXMAKE_CONFIG} \ + --out "${OUT_DIR}" \ + -b "${WORK_DIR}" \ + config kernel modules; then + echo "Kernel build failed. Aborting." >&2 + exit 125 +fi + +# Write kernel and modules path, and kernel git SHA to .env file for sourcing by other scripts +ENV_FILE="$(dirname "$0")/.env" +# Retrieve current git commit SHA from the kernel source repo +if [ -d "${LINUX_REPO_PATH}" ]; then + pushd "${LINUX_REPO_PATH}" >/dev/null + GITSHA="$(git rev-parse HEAD)" + popd >/dev/null +else + GITSHA="" +fi + +echo "GITSHA=${GITSHA}" + +cat > "$ENV_FILE" < Date: Mon, 16 Jun 2025 23:52:22 +0100 Subject: [PATCH 5/5] scripts: add execute_bisection.sh for git bisect-based regression detection Adds a new helper script to support automated kernel regression bisection using 'git bisect run'. The script builds the kernel using build_local_kernel.sh, exports kernel image and modules path, then runs benchmarks using 'fastpath bisect run'. It captures and propagates the exit status from fastpath, enabling correct signaling to git bisect: 0 => improvement or no change (good) 1 => regression (bad) 125 => skip (no data or filter mismatch) This supports fast identification of performance regressions across kernel versions. Signed-off-by: Aishwarya TCV --- scripts/execute_bisection.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100755 scripts/execute_bisection.sh diff --git a/scripts/execute_bisection.sh b/scripts/execute_bisection.sh new file mode 100755 index 0000000..9961ab6 --- /dev/null +++ b/scripts/execute_bisection.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright (c) 2025, Arm Limited. +# SPDX-License-Identifier: MIT + +set -e + +# Build the kernel using existing script +echo "Building kernel for current commit..." +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +${SCRIPT_DIR}/build_local_kernel.sh + +# Source environment variables +source "${SCRIPT_DIR}/.env" + +# Run FastPath bisect evaluation +fastpath bisect run \ + --context ./bisect_template.yaml \ + --kernel "$KERNEL_PATH" \ + --modules "$MODULES_PATH" \ + --gitsha "${GITSHA}" +status=$? +echo "fastpath bisect run exited with status $status" +exit $status \ No newline at end of file -- GitLab