diff --git a/lisa/_assets/kmodules/lisa/features.c b/lisa/_assets/kmodules/lisa/features.c index b4b2f9c92db9bb82b446fde2d4975a4c836416c0..5825e6e05763949f8517e7f1f4c5a052f326f3b0 100644 --- a/lisa/_assets/kmodules/lisa/features.c +++ b/lisa/_assets/kmodules/lisa/features.c @@ -5,6 +5,21 @@ #include "main.h" #include "features.h" +static int __reset_feature(struct feature* feature) { + mutex_lock(feature->lock); + + /* All features should have been deinitialized at this point, so this + * should be 0 + */ + BUG_ON(feature->__explicitly_enabled); + + feature->__enable_ret = 0; + feature->data = NULL; + + mutex_unlock(feature->lock); + return 0; +} + int __enable_feature(struct feature* feature) { int ret; @@ -15,7 +30,7 @@ int __enable_feature(struct feature* feature) { if (feature->enabled) { ret = feature->__enable_ret; } else { - pr_info("Enabling lisa legacy feature %s\n", feature->name); + pr_info("Starting legacy feature %s\n", feature->name); if (feature->enable) ret = feature->enable(feature); else @@ -23,7 +38,7 @@ int __enable_feature(struct feature* feature) { feature->__enable_ret = ret; if (ret) - pr_err("Failed to enable legacy feature %s: %i", feature->name, ret); + pr_err("Failed to start legacy feature %s: %i", feature->name, ret); } feature->enabled++; mutex_unlock(feature->lock); @@ -41,13 +56,13 @@ int __disable_feature(struct feature* feature) { } else { feature->enabled--; if (!feature->enabled) { - pr_info("Disabling lisa legacy feature %s\n", feature->name); + pr_info("Stopping legacy feature %s\n", feature->name); if (feature->disable) ret = feature->disable(feature); else ret = 0; if (ret) - pr_err("Failed to disable legacy feature %s: %i\n", feature->name, ret); + pr_err("Failed to stop legacy feature %s: %i\n", feature->name, ret); } else { ret = 0; } @@ -58,7 +73,7 @@ int __disable_feature(struct feature* feature) { typedef int (*feature_process_t)(struct feature*); -static int __process_features(char **selected, size_t selected_len, feature_process_t process) { +static int __process_features(const char *const *selected, size_t selected_len, feature_process_t process) { int ret = 0; if (selected) { @@ -104,8 +119,9 @@ static int __enable_feature_explicitly(struct feature* feature) { return __enable_feature(feature); } -int init_features(char **selected, size_t selected_len) { +int init_features(const char *const *selected, size_t selected_len) { BUG_ON(MAX_FEATURES < ((__lisa_features_stop - __lisa_features_start) / sizeof(struct feature))); + __process_features(NULL, 0, __reset_feature); pr_info("Available legacy features:"); __process_features(NULL, 0, __list_feature); @@ -116,12 +132,13 @@ static int __disable_explicitly_enabled_feature(struct feature* feature) { int ret = 0; mutex_lock(feature->lock); - int selected = feature->__explicitly_enabled; - mutex_unlock(feature->lock); - while (selected) { + while (feature->__explicitly_enabled) { + mutex_unlock(feature->lock); ret |= __disable_feature(feature); - selected--; + mutex_lock(feature->lock); + feature->__explicitly_enabled--; } + mutex_unlock(feature->lock); return ret; } @@ -129,6 +146,17 @@ int deinit_features(void) { return __process_features(NULL, 0, __disable_explicitly_enabled_feature); } +int init_feature(const char *feature) { + BUG_ON(MAX_FEATURES < ((__lisa_features_stop - __lisa_features_start) / sizeof(struct feature))); + const char *selected[] = {feature}; + return __process_features(selected, ARRAY_SIZE(selected), __enable_feature_explicitly); +} + +int deinit_feature(const char *feature) { + const char *selected[] = {feature}; + return __process_features(selected, ARRAY_SIZE(selected), __disable_explicitly_enabled_feature); +} + int __placeholder_init(struct feature *feature) { pr_err("Legacy feature not available: %s\n", feature->name); return 1; diff --git a/lisa/_assets/kmodules/lisa/features.h b/lisa/_assets/kmodules/lisa/features.h index b739d78bb18c4cf8494f00620fe8167fe2a55e24..cf7630bbb3ae5805c24bedfc639e1148c5dbcfdd 100644 --- a/lisa/_assets/kmodules/lisa/features.h +++ b/lisa/_assets/kmodules/lisa/features.h @@ -141,7 +141,7 @@ int __placeholder_deinit(struct feature *feature); * struct features * is built automatically by DEFINE_FEATURE() and does not * need to be passed. */ -int init_features(char **selected, size_t selected_len); +int init_features(const char *const *selected, size_t selected_len); /** * deinit_features() - De-initialize features @@ -151,6 +151,10 @@ int init_features(char **selected, size_t selected_len); */ int deinit_features(void); + +int init_feature(const char *feature); +int deinit_feature(const char *feature); + /** * feature_name() - Get feature's name * @feature: Pointer to feature to get the name of. diff --git a/lisa/_assets/kmodules/lisa/main.c b/lisa/_assets/kmodules/lisa/main.c index e696214a7c12d4701f9f2c9f3db00f1eb80f43c1..1a0d903f153b4079f3f61b95823569f56315f394 100644 --- a/lisa/_assets/kmodules/lisa/main.c +++ b/lisa/_assets/kmodules/lisa/main.c @@ -2,33 +2,18 @@ #include #include "main.h" -#include "generated/module_version.h" #include "rust/lisakmod/bindings.h" /* Import all the symbol namespaces that appear to be defined in the kernel * sources so that we won't trigger any warning */ #include "generated/symbol_namespaces.h" -static char* version = LISA_MODULE_VERSION; -module_param(version, charp, 0); -MODULE_PARM_DESC(version, "Module version defined as sha1sum of the module sources"); - static void modexit(void) { rust_mod_exit(); } static int __init modinit(void) { - pr_info("Loading Lisa module version %s\n", LISA_MODULE_VERSION); - if (strcmp(version, LISA_MODULE_VERSION)) { - pr_err("Lisa module version check failed. Got %s, expected %s\n", version, LISA_MODULE_VERSION); - return -EPROTO; - } - - int ret = rust_mod_init(); - if (ret) { - pr_err("Lisa module failed to initialize properly: %i\n", ret); - } - return ret; + return rust_mod_init(); } module_init(modinit); diff --git a/lisa/_assets/kmodules/lisa/process_rust.py b/lisa/_assets/kmodules/lisa/process_rust.py index 952dddb2604324f1862572c76381edfa1a801937..676235f0f73901e90e759e53063887a86ccae03f 100755 --- a/lisa/_assets/kmodules/lisa/process_rust.py +++ b/lisa/_assets/kmodules/lisa/process_rust.py @@ -29,6 +29,7 @@ from operator import itemgetter import textwrap import itertools import json +import re SEP = '\n ' @@ -145,7 +146,18 @@ def main(): self.logical_type = logical_type self.c_arg_type = c_arg_type self.c_arg_header = c_arg_header - self.c_field_type = c_field_type + self._c_field_type = c_field_type + + @property + def c_field_type(self): + # Avoid unnecessary wrapping with __typeof__() when it is only + # wrapping an identifier to increase compatibiity with parsers + # that do not support __typeof__() syntax + return re.sub( + r'__typeof__\((([_0-9A-ZA-z]+))\)', + r'\1', + self._c_field_type + ) @property def tp_struct_entry(self): @@ -156,7 +168,7 @@ def main(): elif typ == 'rust-string': # Add +1 for the null-terminator return f'__dynamic_array(char, {self.name}, {self.name}->len + 1)' - elif typ in ('u8', 's8', 'u16', 's16', 'u32', 's32', 'u64', 's64'): + elif typ in ('u8', 's8', 'u16', 's16', 'u32', 's32', 'u64', 's64', 'c-static-string'): return f'__field({self.c_field_type}, {self.name})' else: raise ValueError(f'Unsupported logical type: {typ}') @@ -176,7 +188,7 @@ def main(): memcpy(__get_dynamic_array({self.name}), {self.name}->data, {self.name}->len * sizeof(char)); ((char *)__get_dynamic_array({self.name}))[{self.name}->len] = 0; ''') - elif typ in ('u8', 's8', 'u16', 's16', 'u32', 's32', 'u64', 's64'): + elif typ in ('u8', 's8', 'u16', 's16', 'u32', 's32', 'u64', 's64', 'c-static-string'): return f'{self.entry} = *({self.name});' else: raise ValueError(f'Unsupported logical type: {typ}') @@ -193,6 +205,8 @@ def main(): return (f'{self.name}=%lld', [self.entry]) elif typ == 'u64': return (f'{self.name}=%llu', [self.entry]) + elif typ == 'c-static-string': + return (f'{self.name}=%s', [self.entry]) elif typ in ('rust-string', 'c-string'): return (f'{self.name}=%s', [f'__get_str({self.name})']) else: @@ -211,9 +225,18 @@ def main(): for field in entry['fields'] ] + def wrap_c_type(c_typ): + # Avoid unnecessary wrapping with __typeof__() to increase + # compatibiity with parsers that do not support + # __typeof__() syntax + if c_typ.isidentifier(): + return c_typ + else: + return f'__typeof__({c_typ})' + nl = '\n ' proto = ', '.join( - f'__typeof__({field.c_arg_type}) {field.name}' + f'{wrap_c_type(field.c_arg_type)} {field.name}' for field in fields ) args = ', '.join( diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.lock b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.lock index 76baf9ec28ebf7afe037140838ae6f610cf557c3..675db00d536efb9d376fb8b6a2bc6ab9edebcda7 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.lock +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.lock @@ -4,15 +4,15 @@ version = 4 [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "either" @@ -22,9 +22,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.171" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "lisakmod_macros" @@ -74,9 +74,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -92,15 +92,15 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "syn" -version = "2.0.100" +version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.toml b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.toml index 08af3dd0ff94d03af493e41e4e4011c195ef010c..14f1fce4537d049ed30e54977fcbc88cee7e9f35 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.toml +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/Cargo.toml @@ -6,3 +6,9 @@ edition = "2024" [dependencies] lisakmod_macros_proc = { path = "./macros" } paste = "1.0" + +[features] +# This feature is set by the lisakmod crate when compiling for tests. This way, +# it allows us to know we are being built for test and e.g. not create "unsafe +# extern "C"" functions. +test = [] diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/Cargo.lock b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/Cargo.lock index cf1c7160a25c6d9bd94fa392e392d7a160ba7d33..68a0edc7048b763305a31eecad960cb66c8164fc 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/Cargo.lock +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/Cargo.lock @@ -4,15 +4,15 @@ version = 4 [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "either" @@ -22,9 +22,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.171" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "lisakmod_macros_proc" @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -78,15 +78,15 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "syn" -version = "2.0.100" +version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/src/inlinec.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/src/inlinec.rs index aa162b19876acd9912806bfd148650b812229336..bfc9bd1ce834bc7f2de565eb18d318ec379b1a93 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/src/inlinec.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod-macros/macros/src/inlinec.rs @@ -137,8 +137,8 @@ fn make_c_func( )?; let c_out = [ - _dump_to_binstore(&format!("c.code.{}", c_name), c_out)?, - _dump_to_binstore(&format!("c.header.{}", c_name), c_header_out)?, + _dump_to_binstore(&format!("c.code.{c_name}"), c_out)?, + _dump_to_binstore(&format!("c.header.{c_name}"), c_header_out)?, ]; Ok(quote! { @@ -299,10 +299,17 @@ fn _make_c_func( let rust_out = match rust_name { Some(rust_name) => quote! { + #[cfg(not(any(test, feature = "test")))] unsafe extern "C" { #[link_name = #c_name_str] fn #rust_name #f_generics(#rust_extern_args) -> <#f_ret_ty as ::lisakmod_macros::inlinec::FfiType>::FfiType #f_where; } + + #[cfg(any(test, feature = "test"))] + #[allow(unused)] + fn #rust_name #f_generics(#rust_extern_args) -> <#f_ret_ty as ::lisakmod_macros::inlinec::FfiType>::FfiType #f_where { + ::core::panic!("extern C function are not available during tests") + } }, None => quote! {}, }; @@ -784,7 +791,7 @@ pub fn cstatic(attrs: TokenStream, code: TokenStream) -> Result Result::Signed>, } +macro_rules! errno_codes { + ($($rust_name:ident: $c_name:literal),* $(,)?) => { + impl NegativeError + where + T: Unsigned, + ::Signed: From, + { + $( + #[allow(non_snake_case)] + pub fn $rust_name() -> Self { + let code: i8 = cconstant!("#include ", $c_name).unwrap(); + Self::new((-code).into()) + } + )* + } + } +} + +errno_codes!( + EINVAL: "EINVAL", + EFBIG: "EFBIG", +); + +impl NegativeError +where + T: Unsigned, +{ + pub const fn new(val: ::Signed) -> NegativeError { + NegativeError { + err: ErrorCode::new(val), + } + } +} + impl Clone for NegativeError where T: Unsigned, @@ -1222,7 +1262,7 @@ pub struct ErrorCode { impl ErrorCode { #[inline] - pub fn new(code: T) -> ErrorCode { + pub const fn new(code: T) -> ErrorCode { ErrorCode { code } } } @@ -1432,22 +1472,25 @@ impl PtrError { } pub fn from_ptr(ptr: *mut T) -> Result, PtrError> { - #[cfunc] - fn ptr_err_or_zero(ptr: *mut c_void) -> isize { - r#" - #include - "#; + #[cfg(not(test))] + { + #[cfunc] + fn ptr_err_or_zero(ptr: *mut c_void) -> isize { + r#" + #include + "#; - r#" - return PTR_ERR_OR_ZERO(ptr); - "# - } - if ptr.is_null() { - Err(PtrError::Null) - } else { - match ptr_err_or_zero(ptr as *mut c_void) { - 0 => Ok(NonNull::new(ptr).unwrap()), - err => Err(PtrError::Code(ErrorCode::new(err))), + r#" + return PTR_ERR_OR_ZERO(ptr); + "# + } + if ptr.is_null() { + Err(PtrError::Null) + } else { + match ptr_err_or_zero(ptr as *mut c_void) { + 0 => Ok(NonNull::new(ptr).unwrap()), + err => Err(PtrError::Code(ErrorCode::new(err))), + } } } } diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.lock b/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.lock index e59dd9614ede2bad1ac010eabb02ad4934161ddc..d33bf59b7123b3c149ee33b67e782589c64ee820 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.lock +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.lock @@ -4,21 +4,27 @@ version = 4 [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "dyn-clone" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" [[package]] name = "either" @@ -34,9 +40,9 @@ checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", @@ -53,26 +59,32 @@ dependencies = [ "either", ] +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + [[package]] name = "libc" -version = "0.2.171" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "linkme" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22d227772b5999ddc0690e733f734f95ca05387e329c4084fe65678c51198ffe" +checksum = "a1b1703c00b2a6a70738920544aa51652532cacddfec2e162d2e29eae01e665c" dependencies = [ "linkme-impl", ] [[package]] name = "linkme-impl" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a98813fa0073a317ed6a8055dcd4722a49d9b862af828ee68449adb799b6be" +checksum = "04d55ca5d5a14363da83bf3c33874b8feaa34653e760d5216d7ef9829c88001a" dependencies = [ "proc-macro2", "quote", @@ -89,6 +101,9 @@ dependencies = [ "linkme", "lisakmod_macros", "pin-project", + "schemars", + "serde", + "serde_json", ] [[package]] @@ -110,6 +125,12 @@ dependencies = [ "syn", ] +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + [[package]] name = "paste" version = "1.0.15" @@ -138,9 +159,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -156,15 +177,109 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "ref-cast" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schemars" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe8c9d1c68d67dd9f97ecbc6f932b60eb289c5dbddd8aa1405484a8fd2fcd984" +dependencies = [ + "dyn-clone", + "ref-cast", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ca9fcb757952f8e8629b9ab066fc62da523c46c2b247b1708a3be06dd82530b" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] [[package]] name = "syn" -version = "2.0.100" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.toml b/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.toml index 869c113c81e8fbe2c827417f55b7fb7dbf8c5f7e..4f32f7180f628d11eade1100aaf2e44a29959534 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.toml +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/Cargo.toml @@ -14,6 +14,17 @@ linkme = "0.3.31" # hashbrown = "0.15" lisakmod_macros = { path = "../lisakmod-macros" } pin-project = "1.1" +schemars = { version = "1.0.1", default-features = false, features = ["derive"] } +serde = { version = "1.0", default-features = false, features = ["derive", "alloc"] } +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } + +[dev-dependencies] +lisakmod_macros = { path = "../lisakmod-macros", features = ["test"] } + +[features] +# Useless feature that is only there so that lisakmod_macros can use +# cfg(feature = "test") in its generated code. +test = [] [profile.release] panic = 'abort' diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/rust.lds b/lisa/_assets/kmodules/lisa/rust/lisakmod/rust.lds index 05f89cdca8409c79840c2b3309a0724674b96609..c1002e01cab99006e8239e621d5fe3038cf38864 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/rust.lds +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/rust.lds @@ -2,6 +2,10 @@ PROVIDE(__builtin_copysignq = 0); SECTIONS { + __trace_printk_fmt : { + KEEP(*(__trace_printk_fmt)) + } + /* Remove binstore sections that we created in lisakmod_macros::inlinec Rust module */ /DISCARD/ : { *(.binstore.*) } } diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/rustc_targets/arm64/target.json b/lisa/_assets/kmodules/lisa/rust/lisakmod/rustc_targets/arm64/target.json index 37a7d69d2a1ffe100ff59ea8d04485fb6936e944..36eb0fc9a1f440efdf53785f0781a58dad4f6fb3 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/rustc_targets/arm64/target.json +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/rustc_targets/arm64/target.json @@ -1,7 +1,7 @@ { "abi": "softfloat", "arch": "aarch64", - "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", + "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", "features": "+v8a,+strict-align,-neon,-fp-armv8", "llvm-target": "aarch64-unknown-none", "max-atomic-width": 128, diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/error.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/error.rs index ab64edfb0c97a2ef66a9fdeddd086e86a8d28a68..6cae226914917236688cd9087513cf3ac9dbd67e 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/error.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/error.rs @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::{sync::Arc, vec::Vec}; +use alloc::{borrow::Cow, format, string::String, sync::Arc, vec::Vec}; use core::{error::Error as StdError, fmt}; use anyhow; +use schemars::{JsonSchema, Schema, SchemaGenerator}; use crate::runtime::printk::pr_err; @@ -95,6 +96,49 @@ impl embedded_io::Error for Error { } } +impl serde::de::Error for Error { + #[inline] + fn custom(msg: T) -> Self + where + T: fmt::Display, + { + error!("{msg}") + } +} + +impl serde::Serialize for Error { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&format!("{self:#}")) + } +} + +impl<'de> serde::Deserialize<'de> for Error { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Ok(error!("{s}")) + } +} + +impl JsonSchema for Error { + fn schema_id() -> Cow<'static, str> { + concat!(module_path!(), "::Error").into() + } + + fn schema_name() -> Cow<'static, str> { + Self::schema_id() + } + + fn json_schema(gen_: &mut SchemaGenerator) -> Schema { + ::json_schema(gen_) + } +} + /// Mirror the anyhow::Context API, but returns the original Result type rather than /// Result pub trait ResultExt { @@ -215,22 +259,22 @@ impl fmt::Display for MultiError { options.alternate(true); let mut out = alloc::string::String::new(); - let idt = "\n "; - // Create a new formatter so we can add indentation to the formatted content. for err in &self.inner { - f.write_str(idt)?; + if self.inner.len() > 1 { + f.write_str("\n ")?; + } let item_f = &mut fmt::Formatter::new(&mut out, options); err.fmt(item_f)?; let mut is_first = true; - for chunk in out.split('\n') { + for line in out.split('\n') { if !is_first { - f.write_str(idt)?; + f.write_str("\n ")?; } is_first = false; - f.write_str(chunk)?; + f.write_str(line)?; } out.clear(); diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/all.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/all.rs new file mode 100644 index 0000000000000000000000000000000000000000..03533ceb0612961dec2040dbc093366512497b9c --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/all.rs @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use alloc::{sync::Arc, vec::Vec}; + +use crate::{ + error::Error, + features::{ + DependenciesSpec, Feature, FeatureId, FeaturesService, Visibility, all_features, + register_feature, + }, + lifecycle::{LifeCycle, new_lifecycle}, + query::query_type, +}; + +pub struct AllFeatures; + +query_type! { + #[derive(Clone)] + pub struct AllFeaturesConfig { + pub best_effort: bool, + } +} + +impl AllFeaturesConfig { + fn merge<'a, I>(mut iter: I) -> AllFeaturesConfig + where + I: Iterator, + { + AllFeaturesConfig { + best_effort: iter.all(|conf| conf.best_effort), + } + } +} + +impl AllFeatures { + pub const NAME: &'static str = "all"; +} + +fn default_features() -> impl Iterator> { + all_features().filter(|feat| { + (feat.name() != AllFeatures::NAME) && (feat.visibility() == Visibility::Public) + }) +} + +impl Feature for AllFeatures { + type Service = (); + type Config = AllFeaturesConfig; + + fn name(&self) -> &str { + Self::NAME + } + + fn id(&self) -> FeatureId { + FeatureId::new::() + } + + fn visibility(&self) -> Visibility { + Visibility::Public + } + + fn dependencies(&self) -> Vec { + default_features().map(|feat| feat.id()).collect() + } + + fn configure( + &self, + configs: &mut dyn Iterator, + ) -> Result< + ( + DependenciesSpec, + LifeCycle, Error>, + ), + Error, + > { + let config = AllFeaturesConfig::merge(configs); + let mut spec = DependenciesSpec::new(); + let mandatory = !config.best_effort; + for feat in default_features() { + feat.__push_no_config(&mut spec, mandatory)?; + } + + Ok(( + spec, + new_lifecycle!(|_| { + yield_!(Ok(Arc::new(()))); + Ok(()) + }), + )) + } +} +register_feature!(AllFeatures); diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/events/mod.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/events/mod.rs index 08d95ee68203f41edffd852b1634325d4fa083b8..ea32a7d3cf1bc87963b259a9902042a7c33e41e4 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/events/mod.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/events/mod.rs @@ -1,20 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ - -macro_rules! define_event_feature { - (struct $type:ident, $event_name:literal) => { - $crate::features::define_feature! { - struct $type, - name: concat!("event__", $event_name), - visibility: Public, - Service: (), - Config: (), - dependencies: [], - init: |_| { - Ok($crate::lifecycle::new_lifecycle!(|services| { - yield_!(Ok(::alloc::sync::Arc::new(()))); - Ok(()) - })) - }, - } - }; -} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/legacy.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/legacy.rs deleted file mode 100644 index 5e537bf7a9ff0cb7927ef102bed75393f62180dd..0000000000000000000000000000000000000000 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/legacy.rs +++ /dev/null @@ -1,125 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -use alloc::{sync::Arc, vec::Vec}; -use core::{ - ffi::{CStr, c_char, c_int, c_uint, c_void}, - ptr::NonNull, -}; - -use lisakmod_macros::inlinec::cfunc; - -use crate::{ - error::error, - features::{FeaturesConfig, define_feature}, - lifecycle::new_lifecycle, - runtime::printk::pr_err, -}; - -#[cfunc] -fn features_array() -> Option> { - r#" - #include - #include "features.h" - - static char *features[MAX_FEATURES]; - static unsigned int features_len = 0; - module_param_array(features, charp, &features_len, 0); - MODULE_PARM_DESC(features, "Comma-separated list of features to enable. Available features are printed when loading the module"); - "#; - - r#" - return features; - "# -} - -#[cfunc] -fn features_array_len() -> c_uint { - r#" - static unsigned int features_len; - "#; - - r#" - return features_len; - "# -} - -pub fn module_param_features() -> Option> { - let len = match features_array_len() { - 0 => None, - x => Some(x), - }?; - - let ptr = features_array()?; - let ptr = ptr.as_ptr() as *const *const c_char; - - let slice = unsafe { core::slice::from_raw_parts(ptr, len as usize) }; - Some(slice.iter().map(|s| { - unsafe { CStr::from_ptr(*s) } - .to_str() - .expect("Invalid UTF-8 in feature name") - })) -} - -define_feature! { - struct LegacyFeatures, - name: "__legacy_features", - visibility: Private, - Service: (), - Config: (), - dependencies: [], - init: |configs| { - Ok(( - FeaturesConfig::new(), - new_lifecycle!(|services| { - #[cfunc] - fn list_kernel_features() { - r#" - #include - #include - #include "introspection.h" - "#; - - r#" - pr_info("Kernel features detected. This will impact the module features that are available:\n"); - const char *kernel_feature_names[] = {__KERNEL_FEATURE_NAMES}; - const bool kernel_feature_values[] = {__KERNEL_FEATURE_VALUES}; - for (size_t i=0; i < ARRAY_SIZE(kernel_feature_names); i++) { - pr_info(" %s: %s\n", kernel_feature_names[i], kernel_feature_values[i] ? "enabled" : "disabled"); - } - "# - } - - #[cfunc] - fn start(features: Option>, len: c_uint) -> Result<(), c_int> { - r#" - #include "features.h" - "#; - - r#" - return init_features(len ? features : NULL , len); - "# - } - - - #[cfunc] - fn stop() -> Result<(), c_int> { - r#" - #include "features.h" - "#; - - r#" - return deinit_features(); - "# - } - - list_kernel_features(); - - // We must not bail out here, otherwise we will never run stop(), which will leave - // tracepoint probes installed after modexit, leading to a kernel crash - if let Err(code) = start(features_array(), features_array_len()) { pr_err!("Failed to start legacy C features: {code}") } - yield_!(Ok(Arc::new(()))); - stop().map_err(|code| error!("Failed to stop legacy C features: {code}")) - }) - )) - }, -} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/mod.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/mod.rs index 0f28f38e202d5c8e56836590612901f24f84ce72..7819bcc4659cc3279ba92615ccff59acf324bbb3 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/mod.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/mod.rs @@ -1,40 +1,105 @@ /* SPDX-License-Identifier: GPL-2.0 */ +pub mod all; pub mod events; -pub mod legacy; pub mod pixel6; +pub mod pmu; pub mod tests; -pub mod tracepoint; +pub mod thermal; pub mod wq; -use alloc::{sync::Arc, vec::Vec}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + ffi::CString, + string::String, + sync::Arc, + vec::Vec, +}; use core::{ any::{Any, TypeId, type_name}, - ffi::c_int, + ffi::{CStr, c_char, c_int}, + fmt, fmt::Debug, + ptr::NonNull, }; use linkme::distributed_slice; +use lisakmod_macros::inlinec::{c_realchar, cfunc}; +use schemars::{JsonSchema, Schema, SchemaGenerator}; +use serde::{Deserialize, Serialize}; use crate::{ - error::{Error, MultiResult}, + error::{Error, MultiResult, error}, graph::{Cursor, DfsPostTraversal, Graph, TraversalDirection}, lifecycle::{self, FinishedKind, LifeCycle, new_lifecycle}, + query::query_type, runtime::{ - printk::{pr_debug, pr_err, pr_info}, + printk::{pr_debug, pr_info}, sync::Lock, }, typemap, }; -typemap::make_index!(pub FeaturesConfigIndex, Send); -impl typemap::KeyOf for Feat +pub enum DependencySpec +where + Feat: Feature, +{ + Optional { + configs: Vec<::Config>, + }, + Mandatory { + configs: Vec<::Config>, + }, + Disabled, +} + +impl Clone for DependencySpec +where + Feat: Feature, + ::Config: Clone, +{ + fn clone(&self) -> DependencySpec { + match self { + DependencySpec::Optional { configs } => DependencySpec::Optional { + configs: configs.clone(), + }, + DependencySpec::Mandatory { configs } => DependencySpec::Mandatory { + configs: configs.clone(), + }, + DependencySpec::Disabled => DependencySpec::Disabled, + } + } +} + +impl fmt::Debug for DependencySpec +where + Feat: Feature, + ::Config: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DependencySpec::Optional { configs } => f + .debug_struct("DependencySpec::Optional") + .field("configs", configs) + .finish(), + DependencySpec::Mandatory { configs } => f + .debug_struct("DependencySpec::Mandatory") + .field("configs", configs) + .finish(), + DependencySpec::Disabled => f.debug_struct("Disabled").finish(), + } + } +} + +typemap::make_index!(pub DependenciesSpecIndex, Send); +impl typemap::KeyOf for Feat where Feat: 'static + Feature, + ::Config: Debug + Clone, { - type Value = Vec<::Config>; + type Value = DependencySpec; } -pub type FeaturesConfig = typemap::TypeMap; +pub type DependenciesSpec = typemap::TypeMap; typemap::make_index!(pub FeaturesServiceIndex, Send); impl typemap::KeyOf for Feat @@ -93,34 +158,74 @@ pub enum Visibility { Private, } +#[derive(PartialOrd, Ord, PartialEq, Eq, Debug)] +enum FeatureIdInner { + Normal { + type_id: TypeId, + type_name: &'static str, + }, + Legacy { + name: &'static str, + }, +} + #[derive(PartialOrd, Ord, PartialEq, Eq, Debug)] pub struct FeatureId { - type_id: TypeId, - type_name: &'static str, + inner: FeatureIdInner, } impl FeatureId { fn new() -> Self { FeatureId { - type_id: TypeId::of::(), - type_name: type_name::(), + inner: FeatureIdInner::Normal { + type_id: TypeId::of::(), + type_name: type_name::(), + }, } } } +query_type! { + #[derive(Default, Serialize)] + pub struct ProvidedFeatureResources { + ftrace_events: BTreeSet, + } +} + +query_type! { + #[derive(Default, Serialize)] + pub struct FeatureResources { + provided: ProvidedFeatureResources, + } +} + +pub type GenericConfig = serde_json::Value; + mod private { use alloc::{boxed::Box, format, sync::Arc, vec::Vec}; use super::*; use crate::{ error::{Error, ResultExt as _, error}, - runtime::sync::{LockdepClass, Mutex}, + runtime::sync::{Mutex, new_static_lockdep_class}, }; pub struct LiveFeature { lifecycle: Mutex>, feature: Arc, - children_config: FeaturesConfig, + children_config: DependenciesSpec, + // configs are a type-erased Box::Config>> + config: Box, + } + + pub enum SelectedReason { + ByUser, + AsDep, + } + + pub enum Selected { + Selected(T, SelectedReason), + NotSelected(Arc), } macro_rules! lock_lifecycle { @@ -140,15 +245,30 @@ mod private { )] #[doc(hidden)] pub trait BlanketFeature { + fn __push_no_config( + &self, + configs: &mut DependenciesSpec, + optional: bool, + ) -> Result<(), Error>; + + fn __push_user_config( + &self, + configs: &mut DependenciesSpec, + config: GenericConfig, + ) -> Result<(), Error>; + fn __configure( self: Arc, - parents: &mut dyn Iterator, Error>>, - from_user: &FeaturesConfig, + parents: &mut dyn Iterator, Error>>, + from_user: &DependenciesSpec, ) -> Result; fn __start( &self, live: &LiveFeature, + parent: Option<&LiveFeature>, + config: &DependenciesSpec, + selected_reason: &SelectedReason, parents_service: &mut FeaturesService, start_children: &mut dyn FnMut(&mut FeaturesService) -> Result<(), Error>, ) -> Result<(), Error>; @@ -159,104 +279,192 @@ mod private { stop_parents: &mut dyn FnMut() -> Result<(), Error>, ) -> Result<(), Error>; - fn __id(&self) -> FeatureId; + fn __config_schema(&self, gen_: &mut SchemaGenerator) -> Schema; } impl BlanketFeature for Feat where Feat: 'static + Feature + Send + Sync, + ::Config: Debug + for<'a> Deserialize<'a> + Send + Clone, { + fn __config_schema(&self, gen_: &mut SchemaGenerator) -> Schema { + gen_.subschema_for::<::Config>() + } + + fn __push_no_config( + &self, + configs: &mut DependenciesSpec, + mandatory: bool, + ) -> Result<(), Error> { + let spec = match mandatory { + true => DependencySpec::Mandatory { + configs: Vec::new(), + }, + false => DependencySpec::Optional { + configs: Vec::new(), + }, + }; + configs.insert::(spec); + Ok(()) + } + + fn __push_user_config( + &self, + configs: &mut DependenciesSpec, + config: GenericConfig, + ) -> Result<(), Error> { + // Always allow "null" as config, which will translate to no config being pushed on the + // stack. It is then up to the feature to deal with an empty set of configs and reject + // it if it does not make sense. + let config: Option<::Config> = serde_json::from_value(config) + .map_err(|err| error!("Could not load config of feature {}: {err}", self.name()))?; + match configs.get_mut::() { + None => { + configs.insert::(DependencySpec::Mandatory { + configs: config.as_slice().into(), + }); + } + Some(spec) => match spec { + DependencySpec::Optional { + configs: feat_configs, + } + | DependencySpec::Mandatory { + configs: feat_configs, + } => { + if let Some(config) = config { + feat_configs.push(config) + } + } + DependencySpec::Disabled => {} + }, + } + Ok(()) + } fn __configure( self: Arc, - parents: &mut dyn Iterator, Error>>, - from_user: &FeaturesConfig, + parents: &mut dyn Iterator, Error>>, + from_user: &DependenciesSpec, ) -> Result { let name = self.name(); let mut from_parents = Vec::new(); for parent in parents { match parent { - Ok(Some(parent)) => { - if let Some(configs) = parent.children_config.get::() { - from_parents.extend(configs.iter()) + Ok(Selected::Selected(parent, _)) => { + if let Some(spec) = parent.children_config.get::() { + match spec { + DependencySpec::Mandatory { configs } + | DependencySpec::Optional { configs } => { + from_parents.extend(configs.iter().cloned()) + } + DependencySpec::Disabled => {} + } } } - Ok(None) => {} + Ok(Selected::NotSelected(_)) => {} Err(_) => { return Err(error!("Could not configure parent of feature {name}")); } } } - let empty = Vec::new(); - let from_user = from_user.get::().unwrap_or(&empty); - let mut for_us = from_user.iter().chain(from_parents); + let from_user = match from_user.get::() { + Some( + DependencySpec::Mandatory { configs } | DependencySpec::Optional { configs }, + ) => configs, + _ => &Vec::new(), + }; + let for_us: Vec<::Config> = + from_user.iter().cloned().chain(from_parents).collect(); let (children_config, lifecycle) = self - .configure(&mut for_us) + .configure(&mut for_us.iter()) .with_context(|| format!("Failed to configure feature {name}"))?; let lifecycle = FeatureLifeCycle::::new(lifecycle); + new_static_lockdep_class!(LIVE_FEATURE_LIFECYCLE_LOCKDEP_CLASS); let lifecycle = Mutex::new( Box::new(lifecycle) as Box, - LockdepClass::new(), + LIVE_FEATURE_LIFECYCLE_LOCKDEP_CLASS.clone(), ); Ok(LiveFeature { feature: self as Arc, lifecycle, children_config, + config: Box::new(for_us), }) } - fn __id(&self) -> FeatureId { - FeatureId::new::() - } fn __start( &self, live: &LiveFeature, + parent: Option<&LiveFeature>, + config: &DependenciesSpec, + selected_reason: &SelectedReason, parents_service: &mut FeaturesService, start_children: &mut dyn FnMut(&mut FeaturesService) -> Result<(), Error>, ) -> Result<(), Error> { - lock_lifecycle!(Feat, live, guard, lifecycle); - - let mut register_service = |service: &_| { - parents_service.insert::(Arc::clone(service)); - }; - - match lifecycle.state() { - lifecycle::State::Init | lifecycle::State::Finished(_, Ok(_)) => { - let mut children_service = FeaturesService::new(); - - // Release the lock before recursing in the children so that lockdep does not - // get upset - drop(guard); - start_children(&mut children_service)?; - - // INVARIANT: if a child failed to start, then we do not attempt starting the - // current level. That means that a started node is always reachable from the - // leaves by following a path of other started nodes. Therefore, a - // partially-started graph can always be stopped by traversing it from the - // leaves. - - lock_lifecycle!(Feat, live, guard, lifecycle); - - pr_info!("Starting feature {}", self.name()); - let service = lifecycle - .start(children_service) - .with_context(|| format!("Failed to start feature {}", self.name()))?; - pr_debug!("Started feature {}", self.name()); - - register_service(service); - Ok(()) - } - lifecycle::State::Started(Ok(service)) => { - register_service(service); - Ok(()) + let mut start = move || { + lock_lifecycle!(Feat, live, guard, lifecycle); + + let mut register_service = |service: &_| { + parents_service.insert::(Arc::clone(service)); + }; + + match lifecycle.state() { + lifecycle::State::Init | lifecycle::State::Finished(_, Ok(())) => { + let mut children_service = FeaturesService::new(); + + // Release the lock before recursing in the children so that lockdep does not + // get upset + drop(guard); + start_children(&mut children_service)?; + + // INVARIANT: if a child failed to start, then we do not attempt starting the + // current level. That means that a started node is always reachable from the + // leaves by following a path of other started nodes. Therefore, a + // partially-started graph can always be stopped by traversing it from the + // leaves. + + lock_lifecycle!(Feat, live, guard, lifecycle); + + pr_info!("Starting feature {}", self.name()); + let service = lifecycle + .start(children_service) + .with_context(|| format!("Failed to start feature {}", self.name()))?; + pr_debug!("Started feature {}", self.name()); + + register_service(service); + Ok(()) + } + lifecycle::State::Started(Ok(service)) => { + register_service(service); + Ok(()) + } + lifecycle::State::Started(Err(err)) => Err(err), + // Disallow re-starting a feature that failed to finish properly, as external + // resources may be in an unknown state. + lifecycle::State::Finished(_, Err(err)) => Err(err.context(format!( + "Cannot restart feature {} as it may be in a broken state", + self.name() + ))), } - lifecycle::State::Started(Err(err)) => Err(err), - // Disallow re-starting a feature that failed to finish properly, as external - // resources may be in an unknown state. - lifecycle::State::Finished(_, Err(err)) => Err(err.context(format!( - "Cannot restart feature {} as it may be in a broken state", - self.name() - ))), + }; + match (selected_reason, config.get::()) { + (SelectedReason::ByUser, _) + | (_, Some(DependencySpec::Mandatory { .. }) | None) => start(), + (_, Some(DependencySpec::Optional { .. })) => match start() { + Ok(()) => Ok(()), + Err(err) => { + pr_info!( + "Error while starting {} as an optional dependency of {}: {err:#}", + &live.feature.name(), + match parent { + Some(parent) => parent.feature.name(), + None => "", + } + ); + Ok(()) + } + }, + (_, Some(DependencySpec::Disabled)) => Ok(()), } } @@ -296,30 +504,46 @@ mod private { } } - pub fn start_features(graph: &Graph>) -> Result<(), Error> { + pub fn start_features(graph: &Graph>) -> Result<(), Error> { fn process( - cursor: &Cursor<'_, Option>, + parent: Option<&LiveFeature>, + cursor: &Cursor<'_, Selected>, parents_service: &mut FeaturesService, ) -> Result<(), Error> { match cursor.value() { - Some(live) => { + Selected::Selected(live, reason) => { let mut start_children = |children_service: &mut _| { - cursor - .children() - .try_for_each(|child| process(&child, children_service)) - .with_context(|| { - format!( - "Error while starting children of feature {}", - &live.feature.name() - ) - }) + cursor.children().try_for_each(|child_cursor| { + process(Some(live), &child_cursor, children_service).with_context( + || match child_cursor.value() { + Selected::Selected(_, SelectedReason::ByUser) => { + "Error while starting user-requested feature".into() + } + _ => format!( + "Error while starting children of feature {}", + &live.feature.name() + ), + }, + ) + }) }; - live.feature - .__start(live, parents_service, &mut start_children) + let empty_config = DependenciesSpec::new(); + let config = match parent { + Some(parent) => &parent.children_config, + None => &empty_config, + }; + live.feature.__start( + live, + parent, + config, + reason, + parents_service, + &mut start_children, + ) } - None => cursor + Selected::NotSelected(_) => cursor .children() - .map(|child| process(&child, &mut FeaturesService::new())) + .map(|child| process(parent, &child, &mut FeaturesService::new())) .collect::>() .into_result(), } @@ -328,15 +552,15 @@ mod private { let mut parents_service = FeaturesService::new(); graph .roots() - .map(|root| process(&root, &mut parents_service)) + .map(|root| process(None, &root, &mut parents_service)) .collect::>() .into_result() } - pub fn stop_features(graph: &Graph>) -> Result<(), Error> { - fn process(cursor: Cursor>) -> Result<(), Error> { + pub fn stop_features(graph: &Graph>) -> Result<(), Error> { + fn process(cursor: Cursor>) -> Result<(), Error> { match cursor.value() { - Some(live) => { + Selected::Selected(live, _) => { let mut stop_parents = || { cursor .parents() @@ -354,7 +578,7 @@ mod private { }; live.feature.__stop(live, &mut stop_parents) } - None => cursor + Selected::NotSelected(_) => cursor .parents() .map(process) .collect::>() @@ -375,21 +599,14 @@ pub trait Feature: Send + Sync + private::BlanketFeature { type Service: Send + Sync + Debug where Self: Sized; - type Config: Send + type Config: Send + JsonSchema where Self: Sized; fn name(&self) -> &str; fn visibility(&self) -> Visibility; - // This associated method allows getting a FeatureId when specifying the dependencies without - // having a live instance of the feature. - fn id() -> FeatureId - where - Self: 'static + Sized, - { - FeatureId::new::() - } + fn id(&self) -> FeatureId; #[allow(clippy::type_complexity)] fn configure( @@ -397,7 +614,7 @@ pub trait Feature: Send + Sync + private::BlanketFeature { configs: &mut dyn Iterator, ) -> Result< ( - FeaturesConfig, + DependenciesSpec, LifeCycle, Error>, ), Error, @@ -405,62 +622,113 @@ pub trait Feature: Send + Sync + private::BlanketFeature { where Self: Sized; + #[inline] fn dependencies(&self) -> Vec { Vec::new() } + + #[inline] + fn resources(&self) -> FeatureResources { + Default::default() + } } -pub fn features_lifecycle( + select: Select, + base_config: DependenciesSpec, + configs: Vec>, +) -> Result, Error> where Select: Fn(&dyn Feature) -> bool, { + let features: Vec<_> = all_features().collect(); let graph: Graph> = Graph::new( - __FEATURES - .into_iter() - .map(|f| f()) - .map(|feature| (feature.__id(), feature.dependencies(), feature)), + features + .iter() + .map(|feature| (feature.id(), feature.dependencies(), Arc::clone(feature))), ); let graph = graph.dfs_map(DfsPostTraversal::new( TraversalDirection::FromLeaves, - |value: Arc, mut parents: &mut dyn Iterator>| { - // Select a feature if it has been directly selected or if any of its parent has been - // selected. - let selected = (&mut parents).any(|parent| parent.is_some()) || select(&*value); - if selected { Some(value) } else { None } + |value: Arc, mut parents: &mut dyn Iterator>| { + if select(&*value) { + private::Selected::Selected(value, private::SelectedReason::ByUser) + } else if (&mut parents) + .any(|parent| matches!(parent, private::Selected::Selected(_, _))) + { + private::Selected::Selected(value, private::SelectedReason::AsDep) + } else { + private::Selected::NotSelected(value) + } }, )); - let from_user = &FeaturesConfig::new(); - let configure = - |feature: Option>, - parents: &mut dyn Iterator, Error>>| { - match feature { - Some(feature) => feature.__configure(parents, from_user).map(Some), - None => Ok(None), - } - }; + let mut stacked_configs = base_config; + push_user_configs(&features, &mut stacked_configs, configs)?; + let configure = |feature: private::Selected>, + parents: &mut dyn Iterator< + Item = &Result, Error>, + >| { + match feature { + private::Selected::Selected(feature, reason) => feature + .__configure(parents, &stacked_configs) + .map(|live| private::Selected::Selected(live, reason)), + private::Selected::NotSelected(feature) => Ok(private::Selected::NotSelected(feature)), + } + }; let graph = graph.dfs_map(DfsPostTraversal::new( TraversalDirection::FromLeaves, configure, )); - let graph = Into::>::into(graph); - let graph = graph.expect("Error while configuring features"); + let graph = Into::>::into(graph)?; - new_lifecycle!(|_| { - if let Err(err) = private::start_features(&graph) { - pr_err!("Error while starting features: {err:#}"); - } - - yield_!(Ok(())); + Ok(new_lifecycle!(|_| { + // If this fail, we simply yield the error instead of doing an early return. This way, we + // ensure the 2nd part of the lifecycle will execute, so we get to stop the partially + // started feature graph (some features may have not failed to start and need to be + // stopped). + yield_!(private::start_features(&graph)); + private::stop_features(&graph) + })) +} - if let Err(err) = private::stop_features(&graph) { - pr_err!("Error while stopping features: {err:#}"); - } - Ok(()) - }) +macro_rules! register_feature { + ($type:ident) => { + const _: () = { + #[::linkme::distributed_slice($crate::features::__FEATURES)] + fn register() -> ::alloc::sync::Arc { + ::alloc::sync::Arc::new($type) + } + }; + }; } +#[allow(unused_imports)] +pub(crate) use register_feature; macro_rules! define_feature { ( @@ -470,30 +738,41 @@ macro_rules! define_feature { Service: $service:ty, Config: $config:ty, dependencies: [$($dep:ty),* $(,)?], + resources: $resources:expr, init: $init:expr, ) => { $vis struct $type; + impl $type { + pub const NAME: &'static str = $name; + pub const VISBILITY: $crate::features::Visibility = + $crate::features::Visibility::$visibility; + } + $crate::features::register_feature!($type); - const _: () = { - #[::linkme::distributed_slice($crate::features::__FEATURES)] - fn register() -> ::alloc::sync::Arc { - ::alloc::sync::Arc::new($type) - } - }; - - impl $crate::features::Feature for $type { + impl $crate::features::Feature for $type { type Service = $service; type Config = $config; fn name(&self) -> &str { - $name + Self::NAME } + fn visibility(&self) -> $crate::features::Visibility { - $crate::features::Visibility::$visibility + Self::VISBILITY + } + + fn id(&self) -> $crate::features::FeatureId { + $crate::features::FeatureId::new::() } fn dependencies(&self) -> Vec<$crate::features::FeatureId> { - [$(<$dep>::id()),*].into() + [$( + $crate::features::FeatureId::new::<$dep>() + ),*].into() + } + + fn resources(&self) -> $crate::features::FeatureResources { + ($resources)() } fn configure( @@ -501,7 +780,7 @@ macro_rules! define_feature { configs: &mut dyn ::core::iter::Iterator, ) -> ::core::result::Result< ( - $crate::features::FeaturesConfig, + $crate::features::DependenciesSpec, $crate::lifecycle::LifeCycle< $crate::features::FeaturesService, ::alloc::sync::Arc, @@ -521,3 +800,141 @@ pub(crate) use define_feature; #[distributed_slice] pub static __FEATURES: [fn() -> Arc]; + +pub fn all_features() -> impl Iterator> { + __FEATURES.into_iter().map(|f| f()).chain(legacy_features()) +} + +struct LegacyFeature { + name: &'static str, +} + +impl Feature for LegacyFeature { + type Service = (); + type Config = (); + + fn name(&self) -> &str { + self.name + } + fn visibility(&self) -> Visibility { + Visibility::Public + } + + fn id(&self) -> FeatureId { + FeatureId { + inner: FeatureIdInner::Legacy { name: self.name }, + } + } + + #[inline] + fn resources(&self) -> FeatureResources { + match self.name.strip_prefix("event__") { + Some(event) => FeatureResources { + provided: ProvidedFeatureResources { + ftrace_events: [event].into_iter().map(Into::into).collect(), + }, + }, + None => Default::default(), + } + } + + #[allow(clippy::type_complexity)] + fn configure( + &self, + _configs: &mut dyn Iterator, + ) -> Result< + ( + DependenciesSpec, + LifeCycle, Error>, + ), + Error, + > { + let name = self.name; + let name = CString::new(name) + .map_err(|err| error!("Could not convert feature name to CString: {err}"))?; + Ok(( + Default::default(), + new_lifecycle!(|_| { + #[cfunc] + fn start(feature: *const c_realchar) -> Result<(), c_int> { + r#" + #include "features.h" + "#; + + r#" + return init_feature(feature); + "# + } + + #[cfunc] + fn stop(feature: *const c_realchar) -> Result<(), c_int> { + r#" + #include "features.h" + "#; + + r#" + return deinit_feature(feature); + "# + } + + yield_!({ + // SAFETY: We must not return early here, otherwise we will never run stop(), + // which will leave tracepoint probes installed after modexit, leading to a + // kernel crash + match start(name.as_ptr() as *const c_realchar) { + Err(code) => { + Err(error!("Failed to start legacy C features {name:?}: {code}")) + } + Ok(()) => Ok(Arc::new(())), + } + }); + + stop(name.as_ptr() as *const c_realchar) + .map_err(|code| error!("Failed to stop legacy C feature {name:?}: {code}")) + }), + )) + } +} + +fn legacy_features() -> impl Iterator> { + fn names() -> impl Iterator { + #[cfunc] + fn nth(i: &mut usize) -> Option> { + r#" + #include "features.h" + "#; + + r#" + const struct feature* base = __lisa_features_start; + const struct feature* stop = __lisa_features_stop; + size_t len = stop - base; + + while (1) { + if (*i >= len) { + return NULL; + } else { + const struct feature* nth = base + *i; + *i += 1; + if (nth->__internal) { + continue; + } else { + return nth->name; + } + } + } + "# + } + + let mut i: usize = 0; + core::iter::from_fn(move || { + nth(&mut i).map(|ptr| { + let ptr = ptr.as_ptr(); + unsafe { CStr::from_ptr(ptr as *const c_char) } + .to_str() + .expect("Invalid UTF-8") + }) + }) + } + + names().map(|name| Arc::new(LegacyFeature { name }) as Arc) +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pixel6.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pixel6.rs index 06792aae09224ab5e366ca204486fbef0b5ae383..7da6b2165447f3847af2632d2a8479512a13ec8e 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pixel6.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pixel6.rs @@ -14,12 +14,17 @@ use alloc::{ }; use embedded_io::{Seek as _, Write as _}; +use itertools::Itertools as _; use crate::{ error::{Error, ResultExt as _, error}, - features::{FeaturesConfig, FeaturesService, define_feature, wq::WqFeature}, + features::{ + DependenciesSpec, FeatureResources, FeaturesService, ProvidedFeatureResources, + define_feature, wq::WqFeature, + }, lifecycle::new_lifecycle, parsec::{self, ClosureParser, ParseResult, Parser}, + query::query_type, runtime::{ fs::{File, OpenFlags}, traceevent::new_event, @@ -109,6 +114,7 @@ where struct Device { value_file: File, + #[allow(dead_code)] rate_file: File, config: DeviceConfig, } @@ -126,54 +132,76 @@ impl Device { } } -struct DeviceConfig { - id: DeviceId, - folder: String, - hardware_sampling_rate_hz: u64, +query_type! { + #[derive(Clone)] + struct DeviceConfig { + id: DeviceId, + folder: String, + hardware_sampling_rate_hz: u64, + } } -struct Pixel6EmeterConfig { - devices: Vec, +query_type! { + #[derive(Clone)] + struct Pixel6EmeterConfig { + devices: Vec, + } } -// /* There is no point in setting this value to less than 8 times what is written -// * in usec to POWER_METER_RATE_FILE -// */ -// const POWER_METER_SAMPLING_RATE_MS: u64 = 50; +impl Pixel6EmeterConfig { + fn merge<'a, I>(iter: I) -> Pixel6EmeterConfig + where + I: Iterator, + { + let devices: Vec<_> = iter.flat_map(|config| &config.devices).cloned().collect(); + + // If no device was specified, use default devices for backward compatibility + let devices = if devices.is_empty() { + // 250 Hz works for pixel 6, 7, 8 and 9 so we just use that. + let hardware_sampling_rate_hz = 250; + vec![ + DeviceConfig { + id: 0, + folder: "/sys/bus/iio/devices/iio:device0/".into(), + hardware_sampling_rate_hz, + }, + DeviceConfig { + id: 1, + folder: "/sys/bus/iio/devices/iio:device1/".into(), + hardware_sampling_rate_hz, + }, + ] + } else { + devices + }; + Pixel6EmeterConfig { devices } + } +} define_feature! { struct Pixel6Emeter, - name: "event__lisa__pixel6_emeter", + name: "pixel6_emeter", visibility: Public, Service: (), - Config: (), + Config: Pixel6EmeterConfig, dependencies: [WqFeature], + resources: || { + FeatureResources { + provided: ProvidedFeatureResources { + ftrace_events: ["lisa__pixel6_emeter".into()].into() + } + } + }, init: |configs| { + let config = Pixel6EmeterConfig::merge(configs); Ok(( - FeaturesConfig::new(), + DependenciesSpec::new(), new_lifecycle!(|services| { let services: FeaturesService = services; let wq = services.get::() .expect("Could not get service for WqFeature") .wq(); - // 250 Hz works for pixel 6, 7, 8 and 9 so we just use that. - let hardware_sampling_rate_hz = 250; - let config = Pixel6EmeterConfig { - devices: vec![ - DeviceConfig { - id: 0, - folder: "/sys/bus/iio/devices/iio:device0/".into(), - hardware_sampling_rate_hz, - }, - DeviceConfig { - id: 1, - folder: "/sys/bus/iio/devices/iio:device1/".into(), - hardware_sampling_rate_hz, - }, - ] - }; - let mut devices = config.devices.into_iter().map(|device_config| { let value_file = File::open( &(device_config.folder.to_string() + "/energy_value"), @@ -194,7 +222,7 @@ define_feature! { // confused by the partial write. rate_file.write(content.as_bytes()) .map_err(|err| error!("Could not write \"{sampling_rate}\" to sampling_rate file: {err}"))?; - rate_file.flush(); + rate_file.flush()?; let device = Device { value_file, @@ -204,7 +232,8 @@ define_feature! { Ok(device) }).collect::, Error>>()?; - let emit = new_event! { + #[allow(non_snake_case)] + let trace_lisa__pixel6_emeter = new_event! { lisa__pixel6_emeter, fields: { ts: u64, @@ -215,29 +244,44 @@ define_feature! { } }?; - let hardware_sampling_period_us = 1_000_000 / hardware_sampling_rate_hz; - // There is no point in setting this value to less than 8 times what is written in - // usec to the sampling_rate file, as the hardware will only expose a new value - // every 8 hardware periods. - let software_sampling_rate_us = hardware_sampling_period_us * 8; - - let work_item = wq::new_work_item!(wq, move |work| { - let process_device = |device: &mut Device| -> Result<(), Error> { - let device_id = device.config.id; - device.parse_samples(&mut |sample: Sample| { - emit(sample.ts, device_id, sample.chan, &sample.chan_name, sample.value); - Ok(()) - })?; + + let process_device = move |device: &mut Device| -> Result<(), Error> { + let device_id = device.config.id; + device.parse_samples(&mut |sample: Sample| { + trace_lisa__pixel6_emeter(sample.ts, device_id, sample.chan, &sample.chan_name, sample.value); Ok(()) - }; - for device in &mut devices { - process_device(device) - .with_context(|| format!("Could not read samples from device {}", device.config.id)) - .print_err(); - } - work.enqueue(software_sampling_rate_us); - }); - work_item.enqueue(software_sampling_rate_us); + })?; + Ok(()) + }; + let key = |device: &Device| device.config.hardware_sampling_rate_hz; + devices.sort_by_key(key); + + let mut works: Vec<_> = devices + .into_iter() + .chunk_by(key) + .into_iter() + .map(|(hardware_sampling_rate_hz, devices)| { + let hardware_sampling_period_us = 1_000_000 / hardware_sampling_rate_hz; + // There is no point in setting this value to less than 8 times what is written in + // usec to the sampling_rate file, as the hardware will only expose a new value + // every 8 hardware periods. + let software_sampling_rate_us = hardware_sampling_period_us * 8; + + let mut devices: Vec<_> = devices.collect(); + let process_device = &process_device; + Ok(wq::new_work_item!(wq, move |work| { + for device in &mut devices { + process_device(device) + .with_context(|| format!("Could not read sample from device {}", device.config.id)) + .print_err(); + } + work.enqueue(software_sampling_rate_us); + })) + }).collect::>()?; + + for work in &mut works { + work.enqueue(0); + } yield_!(Ok(Arc::new(()))); Ok(()) }), diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pmu.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pmu.rs new file mode 100644 index 0000000000000000000000000000000000000000..55da924cbbeac716d76bfd662bc5e08e31609482 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/pmu.rs @@ -0,0 +1,598 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +// use crate::{ +// features::{Feature, Visibility}, +// lifecycle::new_lifecycle, +// }; +// +use alloc::{ + collections::{BTreeMap, BTreeSet}, + string::String, + sync::Arc, + vec::Vec, +}; +use core::{ + cell::UnsafeCell, + ffi::{CStr, c_int, c_uint}, + pin::Pin, + ptr::NonNull, + sync::atomic::{AtomicU64, Ordering}, +}; + +use itertools::Itertools as _; +use lisakmod_macros::inlinec::{NegativeError, PtrError, cconstant, cfunc, opaque_type}; + +use crate::{ + error::{Error, error}, + features::{DependenciesSpec, FeatureResources, ProvidedFeatureResources, define_feature}, + lifecycle::new_lifecycle, + query::query_type, + runtime::{ + cpumask::{CpuId, active_cpus, smp_processor_id}, + irqflags::local_irq_save, + printk::pr_err, + traceevent::{TracepointString, new_event, new_tracepoint_string}, + tracepoint::{Tracepoint, new_probe}, + version::kernel_version, + }, +}; + +type EventTyp = u32; + +opaque_type!( + struct CTaskStruct, + "struct task_struct", + "linux/sched.h" +); + +opaque_type!( + struct _CPerfEvent, + "struct perf_event", + "linux/perf_event.h" +); + +query_type! { + #[derive(Clone)] + struct PmuConfig { + events: BTreeSet, + } +} + +impl PmuConfig { + fn merge<'a, I>(iter: I) -> PmuConfig + where + I: Iterator, + { + let events: BTreeSet<_> = iter.flat_map(|config| &config.events).cloned().collect(); + PmuConfig { events } + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + struct RawPerfEventId { + id: u64, + pmu_name: String, + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + struct GenericPerfEventId { + name: String, + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + enum PerfEventId { + Raw(RawPerfEventId), + Generic(GenericPerfEventId), + } +} + +// Information dumped in the ftrace event to identify what event we are using. The +// PerfEventAttr.id is not a great choice as it can also encode the PMU type when +// PERF_TYPE_HARDWARE is used and if the user wants the generic event to be enabled on a specific +// PMU. +enum PerfEventRuntimeId { + Raw { + id: u64, + pmu_name: String, + }, + Generic { + // Use a TracepointString so that we only pay for a pointer-sized field rather than the + // full name copied inside the event. + name: TracepointString, + }, +} + +// Our version of struct perf_event_attr, that can then be turned into a set of values used to +// populate an actual perf_event_attr. +struct PerfEventAttr { + typ: EventTyp, + id: u64, + runtime_id: PerfEventRuntimeId, +} + +impl PerfEventId { + fn attr(&self) -> Result { + match self { + PerfEventId::Generic(id) => { + macro_rules! match_event_data { + ($lookup:expr, $($name:expr => $id:literal),* $(,)?) => {{ + let lookup = $lookup; + match lookup { + $( + $name => Ok(( + new_tracepoint_string!($name), + cconstant!("#include ", $id).unwrap() + )), + )* + _ => Err(error!("Could not find perf event: {lookup}")) + } + }} + } + + let (runtime_name, id) = match_event_data! { + &*id.name, + "cpu-cycles" => "PERF_COUNT_HW_CPU_CYCLES", + "instructions" => "PERF_COUNT_HW_INSTRUCTIONS", + "cache-references" => "PERF_COUNT_HW_CACHE_REFERENCES", + "cache-misses" => "PERF_COUNT_HW_CACHE_MISSES", + "branch-instructions" => "PERF_COUNT_HW_BRANCH_INSTRUCTIONS", + "branch-misses" => "PERF_COUNT_HW_BRANCH_MISSES", + "bus-cycles" => "PERF_COUNT_HW_BUS_CYCLES", + "stalled-cycles-frontend" => "PERF_COUNT_HW_STALLED_CYCLES_FRONTEND", + "stalled-cycles-backend" => "PERF_COUNT_HW_STALLED_CYCLES_BACKEND", + "ref-cycles" => "PERF_COUNT_HW_REF_CPU_CYCLES", + }?; + + const PERF_TYPE_HARDWARE: u32 = + match cconstant!("#include ", "PERF_TYPE_HARDWARE") { + Some(x) => x, + None => 0, + }; + + Ok(PerfEventAttr { + id, + typ: PERF_TYPE_HARDWARE, + runtime_id: PerfEventRuntimeId::Generic { name: runtime_name }, + }) + } + PerfEventId::Raw(id) => { + const PERF_TYPE_RAW: u32 = + match cconstant!("#include ", "PERF_TYPE_RAW") { + Some(x) => x, + None => 0, + }; + Ok(PerfEventAttr { + id: id.id, + // FIXME: we need to pass the actual PMU type matching the id.pmu_name string. + // Otherwise, we will end up with an event on _some_ PMU, that may or may not + // be the one we want. This would fail the pmu_name check down the line. + // + // Note that when we do that, we will also need to deal with the fact a given + // PMU may not support all CPUs, so we won't necessary be able to register such + // event on all CPUs. + // + typ: PERF_TYPE_RAW, + runtime_id: PerfEventRuntimeId::Raw { + id: id.id, + pmu_name: id.pmu_name.clone(), + }, + }) + } + } + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + enum PerfEventTriggerTracepoint { + #[serde(rename = "sched_switch")] + SchedSwitch, + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + enum PerfEventTrigger { + Tracepoint(PerfEventTriggerTracepoint), + } +} + +query_type! { + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + struct PerfEventDesc { + id: PerfEventId, + // FIXME: make use of that + triggers: Vec, + } +} + +#[repr(transparent)] +struct CPerfEventPtr(NonNull>); +unsafe impl Send for CPerfEventPtr {} +unsafe impl Sync for CPerfEventPtr {} + +struct PerfEvent { + desc: Arc, + c_event: CPerfEventPtr, + enabled: AtomicU64, + cpu: CpuId, +} + +impl Drop for PerfEvent { + fn drop(&mut self) { + #[cfunc] + fn release( + event: NonNull>, + ) -> Result> { + r#" + #include + #include "introspection.h" + "#; + r#" + # + #if HAS_SYMBOL(perf_event_release_kernel) + return perf_event_release_kernel(event); + #else + return -ENOSYS; + #endif + "# + } + + let enabled = self.enabled.load(Ordering::Relaxed); + assert!( + enabled == 0, + "Cannot release perf event while not all PerfEventEnableGuard have been dropped" + ); + + release(self.c_event.0).expect("Could not release perf event"); + } +} + +impl PerfEvent { + fn from_desc(desc: Arc, cpu: CpuId) -> Result { + // FIXME: add overflow handler + #[cfunc] + fn perf_event_create_kernel_counter( + id: u64, + // typ is enum perf_type_id + typ: EventTyp, + cpu: c_int, + ) -> Result>, PtrError> { + r#" + #include + #include "introspection.h" + "#; + r#" + #if HAS_SYMBOL(perf_event_create_kernel_counter) + struct perf_event_attr attr = { + .type = typ, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, + .config = id, + }; + + return perf_event_create_kernel_counter(&attr, cpu, NULL, NULL, NULL); + #else + return PTR_ERR(-ENOSYS); + #endif + "# + } + + let attr = desc.id.attr()?; + + let cpu_signed: c_int = cpu.try_into().unwrap(); + let c_event = + perf_event_create_kernel_counter(attr.id, attr.typ, cpu_signed).map_err(|err| { + error!( + "Could not allocate PMU counter for perf event {id:?}: {err}", + id = desc.id + ) + })?; + + #[cfunc] + unsafe fn get_pmu_name<'a>(event: NonNull>) -> &'a CStr { + r#" + #include + "#; + r#" + return event->pmu->name; + "# + } + + match &desc.id { + PerfEventId::Raw(id) => { + let expected = &id.pmu_name; + // SAFETY: we don't hold onto the returned string beyond the life of the struct + // perf_event it was taken from. + let real = unsafe { get_pmu_name(c_event) }; + match real.to_str() { + Ok(real) => { + if real == expected { + Ok(()) + } else { + Err(error!( + "Expected PMU type {expected} for raw perf event {id:?} but found: {real}" + )) + } + } + Err(_) => Err(error!("Could not convert PMU type {real:?} to Rust string")), + } + } + _ => Ok(()), + }?; + + Ok(PerfEvent { + desc, + cpu, + c_event: CPerfEventPtr(c_event), + enabled: AtomicU64::new(0), + }) + } + + fn enable<'a>(&'a self) -> Result, Error> { + let c_event = self.c_event().get_ref(); + + #[cfunc] + fn enable(event: &UnsafeCell<_CPerfEvent>) -> Result> { + r#" + #include + #include "introspection.h" + "#; + r#" + #if HAS_SYMBOL(perf_event_enable) + perf_event_enable(event); + return 0; + #else + return -ENOSYS; + #endif + "# + } + + #[cfunc] + fn check_active(event: &UnsafeCell<_CPerfEvent>) -> bool { + r#" + #include + "#; + r#" + return event->state == PERF_EVENT_STATE_ACTIVE; + "# + } + + // If there are multiple threads involved, the &Self we are working with here must have + // been transmitted somehow, and that transmission channel should come with its own + // synchronization barriers, so Ordering::Relaxed is enough. + self.enabled.fetch_add(1, Ordering::Relaxed); + enable(c_event).map_err(|err| error!("Could not enable perf event: {err}"))?; + + // Create the guard in all cases, so that perf_event_disable() is still called in case we + // return an error. + let guard = PerfEventEnableGuard { event: self }; + + if check_active(c_event) { + Ok(guard) + } else { + Err(error!("Perf event {id:?} is not active", id = self.desc.id)) + } + } + + fn c_event(&self) -> Pin<&UnsafeCell<_CPerfEvent>> { + unsafe { Pin::new_unchecked(self.c_event.0.as_ref()) } + } +} + +struct PerfEventEnableGuard<'a> { + event: &'a PerfEvent, +} + +impl<'a> PerfEventEnableGuard<'a> { + fn read(&self) -> Result { + // Disable IRQs so we don't risk re-entering on that CPU. Since we will only read a given + // event on the CPU it was registered on, this means we can read that counter in any + // context without fear. The only bad thing that could happen is if we call that function + // from a context that could have interrupted an on-going pmu->read() call. + let _guard = local_irq_save(); + + if self.event.cpu == smp_processor_id() { + // The approach taken is a *semi*-safe one as: + // - the execution context is one as of the caller + // (__schedule) with preemption and interrupts being + // disabled + // - the events being traced are per-CPU ones only + // - kernel counter so no inheritance (no child events) + // - counter is being read on/for a local CPU + #[cfunc] + fn read(event: &UnsafeCell<_CPerfEvent>) -> u64 { + r#" + #include + #include + #include + "#; + r#" + // Refresh the count value + event->pmu->read(event); + + // Read the now-refreshed value + return (uint64_t)local64_read(&event->count); + "# + } + Ok(read(self.event.c_event().get_ref())) + } else { + Err(error!("Cannot read a perf event for a remote CPU")) + } + } +} + +impl<'a> Drop for PerfEventEnableGuard<'a> { + fn drop(&mut self) { + #[cfunc] + fn disable(event: NonNull>) { + r#" + #include + #include "introspection.h" + "#; + r#" + #if HAS_SYMBOL(perf_event_disable) + return perf_event_disable(event); + #endif + "# + } + + disable(self.event.c_event.0); + // If there are multiple threads involved, the &Self we are working with here must have + // been transmitted somehow, and that transmission channel should come with its own + // synchronization barriers, so Ordering::Relaxed is enough. We won't attempt to destroy + // the object until the reference is dropped. + self.event.enabled.fetch_sub(1, Ordering::Relaxed); + } +} + +define_feature! { + struct PmuFeature, + name: "pmu", + visibility: Public, + Service: (), + Config: PmuConfig, + dependencies: [], + resources: || { + FeatureResources { + provided: ProvidedFeatureResources { + ftrace_events: [ + "lisa__perf_event_raw".into(), + "lisa__perf_event_generic".into(), + ].into() + } + } + }, + init: |configs| { + let config = PmuConfig::merge(configs); + Ok(( + DependenciesSpec::new(), + new_lifecycle!(|_| { + let cpus: Vec = active_cpus().collect(); + let events = config.events + .into_iter() + .map(|desc| { + let desc = Arc::new(desc); + + let events = cpus + .iter() + .copied() + .map(|cpu| PerfEvent::from_desc(desc.clone(), cpu)) + .collect::, _>>()?; + Ok(events) + }) + .collect::, Error>>()?; + + if events.is_empty() { + Err(error!("No perf event was requested")) + } else { + Ok(()) + }?; + + let guards = events + .iter() + .flatten() + .map(|event| { + Ok(( + event.desc.id.attr()?, + event.enable()? + )) + }) + .collect::, Error>>()?; + + let guard_cpu = |(_, guard): &(_, PerfEventEnableGuard<'_>)| guard.event.cpu; + let per_cpu_guards = guards + .into_iter() + .sorted_by_key(guard_cpu) + .chunk_by(guard_cpu) + .into_iter() + .map(|(cpu, group)| (cpu, group.collect::>())) + .collect::>>(); + + // Before 5.18, there is no prev_state last parameter, so we need to deal with that + // as Pixel 6 kernel is 5.10 + if kernel_version() < (5, 18, 0) { + Err(error!("Kernels prior to v5.18.0 are not supported as the sched_switch tracepoint probe had a different signature")) + } else { + Ok(()) + }?; + + if per_cpu_guards.is_empty() { + yield_!(Ok(Arc::new(()))); + } else { + + #[allow(non_snake_case)] + let trace_lisa__perf_event_raw = new_event! { + lisa__perf_event_raw, + fields: { + // FIXME: once we have something like a a dynmaically-allocated + // TracepointString, use it for PMU name + pmu_name: &str, + event_id: u64, + value: u64, + } + }?; + + #[allow(non_snake_case)] + let trace_lisa__perf_event_generic = new_event! { + lisa__perf_event_generic, + fields: { + // FIXME: Do we really want to have that string ? That makes the event + // 24 bytes long (+4 of buffer record header). If we used a u8 instead, + // it saves 7 bytes (+4 as well). That's a 25% space saving. + event_name: TracepointString, + value: u64, + } + }?; + + let probe = new_probe!( + (_preempt: bool, _prev: *mut CTaskStruct, _next:* mut CTaskStruct, _prev_state: c_uint) { + let cpu = smp_processor_id(); + if let Some(guards) = per_cpu_guards.get(&cpu) { + for (attr, guard) in guards { + let value = guard.read(); + match value { + Ok(value) => { + match &attr.runtime_id { + PerfEventRuntimeId::Raw{pmu_name, id} => { + trace_lisa__perf_event_raw(pmu_name, *id, value); + } + PerfEventRuntimeId::Generic{name} => { + trace_lisa__perf_event_generic(*name, value); + } + } + } + Err(err) => { + pr_err!("{err:#}"); + } + } + } + } + } + ); + + // SAFETY: sched_switch tracepoint has a static lifetime as it cannot suddenly + // disappear. + let tp = unsafe { + Tracepoint::<(bool, *mut CTaskStruct, *mut CTaskStruct, c_uint)>::lookup("sched_switch") + }.ok_or(error!("Could not find sched_switch tracepoint to attach to"))?; + + let registered = tp.register_probe(&probe); + + yield_!(Ok(Arc::new(()))); + + drop(registered); + drop(probe); + } + + Ok(()) + }), + )) + }, +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tests.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tests.rs index 9f00bead89e3beac1814ff1d32830893c11cc09b..59a9746ee003fdc6c59ee7200673fd19aa854555 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tests.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tests.rs @@ -1,13 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ use alloc::{collections::BTreeMap, sync::Arc, vec, vec::Vec}; -use core::ffi::CStr; +use core::{ + ffi::CStr, + sync::atomic::{AtomicU32, Ordering}, +}; use lisakmod_macros::inlinec::{cconstant, ceval, cfunc}; use crate::{ error::Error, - features::{FeaturesConfig, define_feature}, + features::{DependenciesSpec, define_feature}, lifecycle::new_lifecycle, runtime::{ kbox::KernelKBox, @@ -15,10 +18,6 @@ use crate::{ }, }; -unsafe extern "C" { - fn myc_callback(x: u64) -> u64; -} - macro_rules! test { ($name:ident, $block:block) => { #[inline(never)] @@ -33,8 +32,15 @@ macro_rules! test { test! { test1, { - let x = unsafe { myc_callback(42) }; - assert_eq!(x, 43); + #[cfg(not(test))] + { + unsafe extern "C" { + fn myc_callback(x: u64) -> u64; + } + + let x = unsafe { myc_callback(42) }; + assert_eq!(x, 43); + } } } @@ -209,7 +215,7 @@ test! { { use core::ops::{Deref, DerefMut}; - use crate::runtime::sync::{Lock, LockdepClass, Mutex, SpinLock}; + use crate::runtime::sync::{Lock, LockdepClass, Mutex, SpinLock}; { new_static_mutex!(STATIC_MUTEX, u32, 42); macro_rules! test_lock { @@ -240,14 +246,14 @@ test! { }}; } - test_lock!(SpinLock::new(42, LockdepClass::new())); - test_lock!(Mutex::new(42, LockdepClass::new())); + test_lock!(>::new(42, LockdepClass::new("test_spinlock"))); + test_lock!(>::new(42, LockdepClass::new("test_mutex"))); test_lock!(&STATIC_MUTEX); } { use crate::runtime::sync::Rcu; - let rcu = Rcu::new(42, LockdepClass::new()); + let rcu = >::new(42, LockdepClass::new("test_rcu")); assert_eq!(*rcu.lock(), 42); rcu.update(43); assert_eq!(*rcu.lock(), 43); @@ -266,16 +272,70 @@ test! { let mut kobject = KObject::new(kobj_type.clone()); let mut kobject2 = KObject::new(kobj_type.clone()); - kobject.add(Some(&root), "foo"); - let kobject = kobject.finalize().expect("Could not finalize kobject"); + kobject.add(Some(&root), "foo") + .expect("Could not add kobject to sysfs"); + let kobject = kobject.publish().expect("Could not publish kobject"); - kobject2.add(Some(&kobject), "bar"); - let kobject2 = kobject2.finalize().expect("Could not finalize kobject"); + kobject2.add(Some(&kobject), "bar") + .expect("Could not add kobject to sysfs"); + let kobject2 = kobject2.publish().expect("Could not publish kobject"); drop(kobject2); } } +test! { + test8, + { + use crate::runtime::sysfs::{BinFile, BinRWContent, Folder}; + + let mut root = Folder::sysfs_module_root(); + let _ = BinFile::new(&mut root, "file1", 0o644, 1024*1024, BinRWContent::new()); + } +} + +test! { + test9, + { + use crate::runtime::wq::{Wq, new_work_item}; + + + let wq = Wq::new("lisa_test").expect("Could not create workqueue"); + + + let x = AtomicU32::new(0); + let barrier = AtomicU32::new(0); + + let work = new_work_item!(&wq, { + let x = &x; + let barrier = &barrier; + move |work| { + let x_ = x.fetch_add(1, Ordering::SeqCst); + if x_ == 2 { + barrier.store(1, Ordering::SeqCst); + } else { + work.enqueue(1); + } + } + }); + work.enqueue(0); + + #[cfunc] + fn msleep(x: u64) { + "#include "; + "msleep(x);" + } + + // Low-effort barrier + while barrier.load(Ordering::SeqCst) != 1 { + // Sleep a bit, otherwise we keep loading the atomic and that starves the writer (on my + // x86 laptop at least). + msleep(1); + } + assert!(x.load(Ordering::SeqCst) == 3); + } +} + pub fn init_tests() -> Result<(), Error> { // All of those functions are #[inline(never)] to ensure their name appear in backtraces if any // issue happens. @@ -286,6 +346,8 @@ pub fn init_tests() -> Result<(), Error> { test5(); test6(); test7(); + test8(); + test9(); pr_info!("Rust tests finished"); Ok(()) @@ -298,9 +360,10 @@ define_feature! { Service: (), Config: (), dependencies: [], - init: |configs| { + resources: Default::default, + init: |_| { Ok(( - FeaturesConfig::new(), + DependenciesSpec::new(), new_lifecycle!(|_| { init_tests()?; yield_!(Ok(Arc::new(()))); diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/thermal.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/thermal.rs new file mode 100644 index 0000000000000000000000000000000000000000..ccceb3c3f285fb2106e12399ec1da7d38ce96df6 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/thermal.rs @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +// use crate::{ +// features::{Feature, Visibility}, +// lifecycle::new_lifecycle, +// }; +// +use alloc::{ffi::CString, format, string::String, sync::Arc, vec::Vec}; +use core::{ + cell::UnsafeCell, + ffi::{CStr, c_int, c_uint}, + fmt, + ptr::NonNull, +}; + +use itertools::Itertools as _; +use lisakmod_macros::inlinec::{NegativeError, PtrError, cfunc, incomplete_opaque_type}; + +use crate::{ + error::{Error, ResultExt as _, error}, + features::{ + DependenciesSpec, FeatureResources, FeaturesService, ProvidedFeatureResources, + define_feature, wq::WqFeature, + }, + lifecycle::new_lifecycle, + query::query_type, + runtime::{traceevent::new_event, wq}, +}; + +query_type! { + #[derive(Clone)] + struct ThermalZoneConfig { + name: String, + sampling_period_us: u64, + } +} + +query_type! { + #[derive(Clone)] + struct ThermalConfig { + zones: Vec, + } +} + +impl ThermalConfig { + fn merge<'a, I>(iter: I) -> ThermalConfig + where + I: Iterator, + { + let zones: Vec<_> = iter.flat_map(|config| &config.zones).cloned().collect(); + ThermalConfig { zones } + } +} + +type ThermalZoneId = u32; +type Temperature = i32; + +incomplete_opaque_type!( + struct _CThermalZoneDevice, + "struct thermal_zone_device", + "linux/thermal.h" +); + +#[repr(transparent)] +struct CThermalZoneDevice(UnsafeCell<_CThermalZoneDevice>); + +impl CThermalZoneDevice { + fn from_unsafe_cell_ref(tzd: &UnsafeCell<_CThermalZoneDevice>) -> &Self { + // SAFETY: We can safely transmute between CThermalZoneDevice and UnsafeCell<_CThermalZoneDevice> as + // they have the same layout thanks to repr(transparent) + unsafe { + core::mem::transmute::<&UnsafeCell<_CThermalZoneDevice>, &CThermalZoneDevice>(tzd) + } + } +} + +unsafe impl Send for CThermalZoneDevice {} +// SAFETY: struct thermal_zone_device has its lock infrastructure to be usable as Sync +unsafe impl Sync for CThermalZoneDevice {} + +struct ThermalZone { + id: ThermalZoneId, + tzd: &'static CThermalZoneDevice, + config: ThermalZoneConfig, +} + +impl fmt::Debug for ThermalZone { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ThermalZone") + .field("id", &self.id) + .finish_non_exhaustive() + } +} + +impl ThermalZone { + fn from_config(config: ThermalZoneConfig) -> Result { + #[cfunc] + fn by_name(name: &CStr) -> Result>, PtrError> { + r#" + #include + "#; + + r#" + return thermal_zone_get_zone_by_name(name); + "# + } + + #[cfunc] + fn get_id(tzd: &UnsafeCell<_CThermalZoneDevice>) -> c_uint { + r#" + #include + #include "introspection.h" + "#; + + r#" + // From kernel v6.4, we have that function + #if HAS_SYMBOL(thermal_zone_device_id) + return thermal_zone_device_id(tzd); + // In earlier versions, struct thermal_zone_device is defined in the public header. + #else + return tzd->id; + #endif + "# + } + + let c_name = CString::new(&*config.name) + .map_err(|err| error!("Could not convert thermal zone name to C string: {err}"))?; + let tzd = by_name(c_name.as_c_str()) + .map_err(|err| error!("Could not resolve thermal zone: {err}"))?; + + // FIXME: is that assumption on thermal lifetime true ? + // SAFETY: The thermal zone lives forever + let tzd: &'static UnsafeCell<_CThermalZoneDevice> = unsafe { tzd.as_ref() }; + let id = get_id(tzd); + // SAFETY: We can safely transmute between CThermalZoneDevice and UnsafeCell<_CThermalZoneDevice> as + // they have the same layout thanks to repr(transparent) + let tzd = CThermalZoneDevice::from_unsafe_cell_ref(tzd); + Ok(ThermalZone { config, tzd, id }) + } + + fn get_temperature(&self) -> Result { + #[cfunc] + fn get_temperature( + tzd: &UnsafeCell<_CThermalZoneDevice>, + temperature: &mut c_int, + ) -> Result> { + r#" + #include + "#; + + r#" + return thermal_zone_get_temp(tzd, temperature); + "# + } + + let mut temperature: c_int = 0; + match get_temperature(&self.tzd.0, &mut temperature) { + Err(err) => Err(error!("{err}")), + Ok(0) => Ok(()), + Ok(_) => unreachable!(), + }?; + Ok(temperature) + } +} + +define_feature! { + struct ThermalFeature, + name: "thermal", + // FIXME: set to Public once the lifetime of the pointer returned by + // thermal_zone_get_zone_by_name() is figured out (probably needs some refcounting) + visibility: Private, + Service: (), + Config: ThermalConfig, + dependencies: [WqFeature], + resources: || { + FeatureResources { + provided: ProvidedFeatureResources { + ftrace_events: ["lisa__thermal".into()].into() + } + } + }, + init: |configs| { + let config = ThermalConfig::merge(configs); + Ok(( + DependenciesSpec::new(), + new_lifecycle!(|services| { + let services: FeaturesService = services; + let wq = services.get::() + .expect("Could not get service for WqFeature") + .wq(); + + let mut zones = config.zones.into_iter() + .map(ThermalZone::from_config) + .collect::, Error>>()?; + + if zones.is_empty() { + Err(error!("No thermal zone was requested")) + } else { + Ok(()) + }?; + + #[allow(non_snake_case)] + let trace_lisa__thermal = new_event! { + lisa__thermal, + fields: { + id: ThermalZoneId, + name: &str, + temp: Temperature, + } + }?; + + let process_zone = move |zone: &mut ThermalZone| -> Result<(), Error> { + trace_lisa__thermal(zone.id, &zone.config.name, zone.get_temperature()?); + Ok(()) + }; + + let key = |zone: &ThermalZone| zone.config.sampling_period_us; + zones.sort_by_key(key); + let mut works: Vec<_> = zones + .into_iter() + .chunk_by(key) + .into_iter() + .map(|(sampling_period_us, zones)| { + let mut zones: Vec<_> = zones.collect(); + let process_zone = &process_zone; + Ok(wq::new_work_item!(wq, move |work| { + for zone in &mut zones { + process_zone(zone) + .with_context(|| format!("Could not read temperature from zone {}", zone.config.name)) + .print_err(); + } + work.enqueue(sampling_period_us); + })) + }).collect::>()?; + + for work in &mut works { + work.enqueue(0); + } + yield_!(Ok(Arc::new(()))); + Ok(()) + }), + )) + }, +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tracepoint.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tracepoint.rs deleted file mode 100644 index 2ed02baccafba648dfa6bd2a8a23bdcf3631100d..0000000000000000000000000000000000000000 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/tracepoint.rs +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -use alloc::{sync::Arc, vec::Vec}; - -pub use crate::runtime::tracepoint::*; -use crate::{ - features::{FeaturesConfig, define_feature}, - lifecycle::new_lifecycle, -}; - -#[derive(Debug)] -pub struct TracepointService { - dropper: ProbeDropper, -} - -impl TracepointService { - fn new() -> TracepointService { - TracepointService { - dropper: ProbeDropper::new(), - } - } - - pub fn probe_dropper(&self) -> &ProbeDropper { - &self.dropper - } -} - -define_feature! { - pub struct TracepointFeature, - name: "__tp", - visibility: Private, - Service: TracepointService, - Config: (), - dependencies: [], - init: |configs| { - Ok(( - FeaturesConfig::new(), - new_lifecycle!(|services| { - yield_!(Ok(Arc::new(TracepointService::new()))); - Ok(()) - }) - )) - }, -} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/wq.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/wq.rs index 5b50f3a9d96ad4af42b9506e6ffc38ddedd89010..dee9a6bf6f6fe7c696ad95f35da3b0ee9be4983e 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/wq.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/features/wq.rs @@ -4,7 +4,8 @@ use alloc::{sync::Arc, vec::Vec}; pub use crate::runtime::wq::*; use crate::{ - features::{FeaturesConfig, define_feature}, + error::Error, + features::{DependenciesSpec, define_feature}, lifecycle::new_lifecycle, }; @@ -14,8 +15,10 @@ pub struct WqService { } impl WqService { - fn new() -> WqService { - WqService { wq: Wq::new() } + fn new() -> Result { + Ok(WqService { + wq: Wq::new("lisa_features")?, + }) } pub fn wq(&self) -> &Wq { @@ -30,11 +33,12 @@ define_feature! { Service: WqService, Config: (), dependencies: [], - init: |configs| { + resources: Default::default, + init: |_| { Ok(( - FeaturesConfig::new(), - new_lifecycle!(|services| { - yield_!(Ok(Arc::new(WqService::new()))); + DependenciesSpec::new(), + new_lifecycle!(|_| { + yield_!(Ok(Arc::new(WqService::new()?))); Ok(()) }) )) diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/fmt.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/fmt.rs index 7e60263004b698ac590e088f45a6376e72cd14bd..1cb49c1842ea8be5c845013661066a9e9c8ef82e 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/fmt.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/fmt.rs @@ -1,15 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use core::{ - cmp::{max, min}, - mem::MaybeUninit, -}; +use core::cmp::{max, min}; use crate::runtime::{alloc::KernelAlloc, kbox::KBox}; - -// FIXME: when we get config and values from Makefile available somehow, this should be set to -// KBUILD_MODNAME -pub const PRINT_PREFIX: &str = "lisa: "; +pub use crate::version::print_prefix; // pub struct SliceWriter { // inner: W, @@ -150,7 +144,7 @@ where let mut new = KBox::::try_new_uninit_slice_in(new_size, *self.inner.allocator()) .map_err(|_| core::fmt::Error)?; new[..cur_size].write_copy_of_slice(&self.inner); - MaybeUninit::fill(&mut new[cur_size..], 0); + new[cur_size..].write_filled(0); // SAFETY: We initialized both the first part of the buffer from the old data and the extra // area with 0. let new = unsafe { new.assume_init() }; diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/graph.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/graph.rs index 51c55e1d27a200a5ff2d1cff5b7c3b9116fac5b8..11c37fe3700e05c2bf7c3ddf73d26fc00a95faca 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/graph.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/graph.rs @@ -304,67 +304,6 @@ impl Graph { pub fn leaves(&self) -> impl Iterator> { self.links.leaves().map(|idx| self.cursor(idx)) } - - // FIXME: remove it if implementation is not finished - pub fn children_of(self, filter: F) -> Self - where - F: Fn(&T) -> bool, - { - let select = |value, parents: &mut dyn Iterator| { - let selected = parents - .collect::>() - .into_iter() - .any(|(selected, _)| *selected) - || filter(&value); - (selected, value) - }; - - let graph = self.dfs_map(DfsPostTraversal::new( - TraversalDirection::FromLeaves, - select, - )); - - let filter_links = |links: &Vec>| { - links - .iter() - .map(|nodes_idx| { - nodes_idx - .iter() - .copied() - .filter(|idx: &usize| { - let (selected, _) = &graph.nodes[*idx]; - *selected - }) - .collect::>() - }) - .collect::>() - }; - - let parents_idx = filter_links(&graph.links.parents); - let children_idx = filter_links(&graph.links.children); - - let links = Arc::new(Links { - parents: parents_idx, - children: children_idx, - }); - - // graph.nodes.into_iter().map(|(selected, value)|).collect(), - // for node in graph.nodes.into_iter() { - - // } - - // Graph { - // links: self.links, - // nodes: self.nodes.into_iter().map(f).collect(), - // } - - // for node in self. - - // for node in self.roots() { - // if filter(node.value()) - // } - todo!() - } } impl From>> for Result, MultiError> { diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/init.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/init.rs index 45b34ba6f2b67328722fe3d0f2dda495e75093b6..ff533cd38b888d6856aa4917505cf88f584f75bc 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/init.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/init.rs @@ -1,24 +1,64 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::collections::BTreeSet; -use core::ffi::c_int; +use alloc::{ + boxed::Box, + collections::{BTreeMap, btree_map}, + string::String, + sync::Arc, + vec, + vec::Vec, +}; +use core::{ + ffi::c_int, + ops::DerefMut, + pin::Pin, + sync::atomic::{AtomicU64, Ordering}, +}; + +use lisakmod_macros::inlinec::cfunc; use crate::{ - features::{Feature, Visibility, features_lifecycle}, - lifecycle::LifeCycle, + error::{Error, error}, + features::{ + DependenciesSpec, DependencySpec, + all::{AllFeatures, AllFeaturesConfig}, + features_lifecycle, + }, + lifecycle::{LifeCycle, new_lifecycle}, + query::{ConfigStackItem, QueryService, QuerySession, SessionId}, + runtime::{ + printk::{pr_err, pr_info}, + sync::{Lock as _, Mutex, new_static_lockdep_class}, + sysfs::Folder, + wq::Wq, + }, + version::module_version, }; -// FIXME: clean that up -// use alloc::{collections::BTreeSet, sync::Arc, vec::Vec}; -// use core::ffi::{c_int, c_ulong, c_void}; +// use alloc::{collections::BTreeMap, string::String, sync::Arc, vec, vec::Vec}; +// use core::{ +// ffi::{c_int, c_ulong, c_void}, +// ops::DerefMut, +// }; +// +// use lisakmod_macros::inlinec::cfunc; // // use crate::{ +// error::{Error, error}, // features::{ -// Feature, FeaturesConfig, FeaturesService, Visibility, define_feature, features_lifecycle, -// tracepoint::TracepointFeature, wq::WqFeature, +// DependenciesSpec, DependencySpec, Feature, FeaturesService, GenericConfig, +// all::{AllFeatures, AllFeaturesConfig}, +// define_feature, features_lifecycle, +// tracepoint::TracepointFeature, +// wq::WqFeature, // }, // lifecycle::{LifeCycle, new_lifecycle}, -// runtime::printk::pr_info, +// query::QueryService, +// runtime::{ +// printk::{pr_err, pr_info}, +// sync::{Lock as _, LockdepClass, Mutex}, +// }, +// version::module_version, // }; // define_feature! { // struct Feature1, @@ -29,7 +69,7 @@ use crate::{ // dependencies: [Feature2, TracepointFeature, WqFeature], // init: |configs| { // Ok(( -// FeaturesConfig::new(), +// DependenciesSpec::new(), // new_lifecycle!(|services| { // let services: FeaturesService = services; // let dropper = services.get::() @@ -141,6 +181,23 @@ use crate::{ // run(c"/trace-cmd extract -at > stdout.extract 2>&1"); // pr_info!("TRACING STOPED"); // +// +// use crate::runtime::sysfs::{KObjType, KObject}; +// +// let root = KObject::sysfs_module_root(); +// +// let kobj_type = Arc::new(KObjType::new()); +// let mut kobject = KObject::new(kobj_type.clone()); +// let mut kobject2 = KObject::new(kobj_type.clone()); +// +// kobject.add(Some(&root), "folder1") +// .expect("Could not add kobject to sysfs"); +// let kobject = kobject.publish().expect("Could not publish kobject"); +// +// kobject2.add(Some(&kobject), "folder2") +// .expect("Could not add kobject to sysfs"); +// let kobject2 = kobject2.publish().expect("Could not publish kobject"); +// // let feature2_service = services.get::(); // pr_info!("FEATURE2 service: {feature2_service:?}"); // yield_!(Ok(Arc::new(()))); @@ -154,7 +211,7 @@ use crate::{ // )) // }, // } -// + // #[derive(Debug)] // struct Feature2Service { // a: u32, @@ -169,7 +226,7 @@ use crate::{ // dependencies: [], // init: |configs| { // Ok(( -// FeaturesConfig::new(), +// DependenciesSpec::new(), // new_lifecycle!(|services| { // pr_info!("FEATURE2 start"); // let service = Feature2Service { a: 42 }; @@ -182,23 +239,217 @@ use crate::{ // } // define_event_feature!(struct myevent); -pub fn module_main() -> LifeCycle<(), (), c_int> { - // Always enable the C legacy features, as it is just a proxy to enable the C features. - let select_legacy = |feature: &dyn Feature| feature.name() == "__legacy_features"; - let select_public = |feature: &dyn Feature| { - select_legacy(feature) || feature.visibility() == Visibility::Public - }; - - let to_enable = crate::features::legacy::module_param_features(); - match to_enable { - None => features_lifecycle(select_public), - Some(features) => { - let features: BTreeSet<_> = features.collect(); - let select = |feature: &dyn Feature| { - select_legacy(feature) - || (select_public(feature) && features.contains(feature.name())) - }; - features_lifecycle(select) +pub struct State { + lifecycle: Mutex>>, + config_stack: Mutex>, + sessions: Mutex>, + session_id: AtomicU64, + wq: Pin>, +} + +fn pop_configs(mut stack: S, n: usize) -> Result +where + S: DerefMut>, +{ + let len = stack.len(); + if n > len { + Err(error!( + "Cannot pop {n} configs as only {len} are left in the stack" + )) + } else { + let new_len = len.saturating_sub(n); + stack.truncate(new_len); + Ok(new_len) + } +} + +impl State { + fn new() -> Result { + new_static_lockdep_class!(STATE_LIFECYCLE_LOCKDEP_CLASS); + new_static_lockdep_class!(STATE_CONFIG_STACK_LOCKDEP_CLASS); + new_static_lockdep_class!(STATE_SESSIONS_LOCKDEP_CLASS); + Ok(State { + lifecycle: Mutex::new(None, STATE_LIFECYCLE_LOCKDEP_CLASS.clone()), + config_stack: Mutex::new(Vec::new(), STATE_CONFIG_STACK_LOCKDEP_CLASS.clone()), + sessions: Mutex::new(BTreeMap::new(), STATE_SESSIONS_LOCKDEP_CLASS.clone()), + session_id: AtomicU64::new(0), + wq: Box::pin(Wq::new("lisa_state")?), + }) + } + + pub fn config_stack(&self) -> Result, Error> { + Ok(self.config_stack.lock().clone()) + } + + pub fn push_config(&self, query: ConfigStackItem) -> Result<(), Error> { + self.config_stack.lock().push(query); + Ok(()) + } + + pub fn pop_configs(&self, n: usize) -> Result { + let stack = self.config_stack.lock(); + pop_configs(stack, n) + } + + pub fn pop_all_configs(&self) -> Result { + let stack = self.config_stack.lock(); + let n = stack.len(); + pop_configs(stack, n) + } + + pub fn restart(&self, f: F) -> Result<(), Error> + where + F: FnOnce() -> Result, Error>, + { + let mut lifecycle = self.lifecycle.lock(); + match &mut *lifecycle { + None => Ok(()), + Some(lifecycle) => lifecycle.stop(), + }?; + *lifecycle = Some(f()?); + lifecycle.as_mut().unwrap().start(()).cloned() + } + + pub fn stop(&self) -> Result<(), Error> { + match &mut *self.lifecycle.lock() { + None => Ok(()), + Some(lifecycle) => lifecycle.stop(), } } + + pub fn new_session(&self, root: &mut Folder, state: Arc) -> Result { + let id = self.session_id.fetch_add(1, Ordering::Relaxed); + let session = QuerySession::new(root, state, id)?; + match self.sessions.lock().entry(id) { + btree_map::Entry::Vacant(entry) => Ok(entry.insert(session).name()), + _ => Err(error!("Session ID {id} already exists")), + } + } + + pub fn with_session(&self, id: SessionId, f: F) -> Result + where + F: FnOnce(&mut QuerySession) -> T, + { + match self.sessions.lock().get_mut(&id) { + Some(session) => Ok(f(session)), + None => Err(error!("Could not find session ID {id}")), + } + } + + pub fn close_session(&self, id: SessionId) { + // Ensure we drop the lock guard before dropping the removed value, otherwise we can get a + // deadlock as we are taking session lock first then sysfs lock. When accessing control + // files, the kernel takes the sysfs lock and then we take the session lock + drop({ + let mut guard = self.sessions.lock(); + let x = guard.remove(&id); + drop(guard); + x + }) + } + + pub fn finalize(&self) { + // This is needed in order to break the Arc cycle: + // State -> QuerySession -> State + // which would otherwise prevent deallocation of the State. + *self.sessions.lock() = BTreeMap::new(); + self.wq.clear_owned_work(); + } + + pub fn wq(&self) -> Pin<&Wq> { + self.wq.as_ref() + } +} + +#[cfunc] +fn enable_all_param() -> bool { + r#" + #include + + static bool ___param_enable_all_features = true; + module_param(___param_enable_all_features, bool, 0); + MODULE_PARM_DESC(___param_enable_all_features, "If true, make a best effort attempt to enable all the features with no specific configuration upon module load."); + "#; + + r#" + return ___param_enable_all_features; + "# +} + +#[cfunc] +fn list_kernel_features() { + r#" + #include + #include + #include "introspection.h" + "#; + + r#" + pr_info("Kernel features detected. This will impact the module features that are available:\n"); + const char *kernel_feature_names[] = {__KERNEL_FEATURE_NAMES}; + const bool kernel_feature_values[] = {__KERNEL_FEATURE_VALUES}; + for (size_t i=0; i < ARRAY_SIZE(kernel_feature_names); i++) { + pr_info(" %s: %s\n", kernel_feature_names[i], kernel_feature_values[i] ? "enabled" : "disabled"); + } + "# +} + +pub fn module_main() -> LifeCycle<(), (), c_int> { + new_lifecycle!(|_| { + let version = module_version(); + pr_info!("Loading Lisa module version {version}"); + + let state = Arc::new(State::new().map_err(|err| { + pr_err!("Error while creating the state workqueue: {err:#}"); + 1 + })?); + let query_service = QueryService::new(Arc::clone(&state)).map_err(|err| { + pr_err!("Error while creating the query service: {err:#}"); + 1 + })?; + + list_kernel_features(); + + let enable_all = enable_all_param(); + + // Legacy behavior: when loading the module without specifying any parameter, we load all + // the features on a best-effort basis. This allows a basic user to get all what they can + // that does not strictly require configuration out of the module. + if enable_all { + let mut spec = DependenciesSpec::new(); + spec.insert::(DependencySpec::Mandatory { + configs: vec![AllFeaturesConfig { best_effort: true }], + }); + + let make_lifecycle = + || features_lifecycle(|feat| feat.name() == AllFeatures::NAME, spec, Vec::new()); + + // AFTER THIS POINT, NO MORE EARLY RETURN + // Since we are going to create a LifeCycle, it would need to be stop()-ed before exiting, + // so we don't want to do an early return and just drop it. + + yield_!(match state.restart(make_lifecycle) { + Ok(()) => Ok(()), + Err(err) => { + pr_err!("Error while starting features: {err:#}"); + // Best-effort basis, so we don't return an error code. If we did so, this + // would prevent from unloading the module. + Ok(()) + } + }); + } else { + yield_!(Ok(())); + } + + drop(query_service); + state.stop().map_err(|err| { + pr_err!("Error while stopping features: {err:#}"); + // This is ignored as the module_exit() returns void + 0 + })?; + + state.finalize(); + pr_info!("Unloaded Lisa module version {version}"); + Ok(()) + }) } diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/lib.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/lib.rs index ad240b9e0c2ed426244addc1512a9e9ca5a38ebc..20b85048b3dab9d6512dd5307b57c813dcff685f 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/lib.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/lib.rs @@ -6,12 +6,12 @@ #![feature(coerce_unsized)] #![feature(unsize)] #![feature(maybe_uninit_write_slice)] -#![feature(type_alias_impl_trait)] #![feature(arbitrary_self_types_pointers)] #![feature(formatting_options)] #![feature(try_trait_v2)] -#![feature(fn_traits)] +#![feature(let_chains)] #![feature(unboxed_closures)] +#![feature(type_alias_impl_trait)] extern crate alloc; @@ -29,6 +29,7 @@ pub mod mem; pub mod misc; pub mod parsec; pub mod prelude; -pub mod registry; +pub mod query; pub mod runtime; pub mod typemap; +pub mod version; diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/mem.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/mem.rs index 2ca9c7c880d444a683dfd5e035dc5f75c37f65f8..7dbccb129c53894820e1dd269c388d96441b5855 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/mem.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/mem.rs @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ +use core::marker::PhantomData; + macro_rules! container_of { ($container:ty, $member:ident, $ptr:expr) => {{ let ptr = $ptr; @@ -22,6 +24,10 @@ macro_rules! mut_container_of { pub(crate) use mut_container_of; pub trait FromContained { + /// # Safety + /// + /// The returned *const Self must be a pointer valid for reads derived from contained. The + /// input "contained" must be valid for reads. unsafe fn from_contained(contained: *const Contained) -> *const Self; } @@ -70,3 +76,24 @@ macro_rules! destructure { } #[allow(unused_imports)] pub(crate) use destructure; + +pub struct NotSend { + _phantom: PhantomData<*const ()>, +} +impl Default for NotSend { + fn default() -> Self { + Self::new() + } +} + +impl NotSend { + pub fn new() -> NotSend { + NotSend { + _phantom: PhantomData, + } + } +} + +#[repr(transparent)] +pub struct UnsafeSync(pub T); +unsafe impl Sync for UnsafeSync {} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/parsec.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/parsec.rs index 7672e7b99e58224727b0911765f53989f40536b7..a0ebff1f3a05f40413699e5bda4b0ea683e18fca 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/parsec.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/parsec.rs @@ -357,19 +357,19 @@ where } }); let out = iter.collect(); - if let Some(min) = min { - if len < min { - return ParseResult::Failure { - err: E::from_msg("Not enough items consumed"), - }; - } + if let Some(min) = min + && len < min + { + return ParseResult::Failure { + err: E::from_msg("Not enough items consumed"), + }; } - if let Some(max) = max { - if len > max { - return ParseResult::Failure { - err: E::from_msg("Too many items consumed"), - }; - } + if let Some(max) = max + && len > max + { + return ParseResult::Failure { + err: E::from_msg("Too many items consumed"), + }; } ParseResult::Success { remainder: saved, diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/query.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/query.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d2505886553112fa2da461dbd5abd6eaeee1c2d --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/query.rs @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::String, + sync::Arc, + vec::Vec, +}; + +use schemars::{Schema, SchemaGenerator, json_schema, schema_for}; +use serde::Serialize; + +use crate::{ + error::{Error, ResultExt as _, error}, + features::{ + DependenciesSpec, Feature, FeatureResources, GenericConfig, Visibility, all_features, + features_lifecycle, + }, + init::State, + runtime::{ + sync::{Lock as _, Mutex, new_static_lockdep_class}, + sysfs::{BinFile, BinROContent, BinRWContent, Folder}, + wq::new_attached_work_item, + }, + version::module_version, +}; + +macro_rules! query_type { + ($($tt:tt)*) => { + #[derive(Debug, ::serde::Deserialize, ::schemars::JsonSchema)] + #[serde(rename_all = "kebab-case")] + $($tt)* + } +} +pub(crate) use query_type; + +query_type! { + enum PopConfigN { + All, + #[serde(untagged)] + N(usize), + } +} + +fn push_config_schema(gen_: &mut SchemaGenerator) -> Schema { + let mut schema = json_schema!({ + "type": "object", + "additionalProperties": false, + "properties": {} + }); + match schema.get_mut("properties") { + Some(serde_json::Value::Object(properties)) => { + let features = all_features().filter(|feat| feat.visibility() == Visibility::Public); + for feature in features { + let val = feature.__config_schema(gen_); + let val = serde_json::to_value(val).unwrap(); + properties.insert(feature.name().into(), val); + } + } + _ => unreachable!(), + } + schema +} + +query_type! { + #[derive(Clone, Serialize)] + pub struct ConfigStackItem { + #[schemars(schema_with = "push_config_schema")] + config: BTreeMap, + enable_features: BTreeSet, + } +} + +query_type! { + enum Query { + PushFeaturesConfig(ConfigStackItem), + PopFeaturesConfig { n: PopConfigN }, + GetFeaturesConfig, + GetVersion, + GetResources, + GetSchemas, + StartFeatures, + StopFeatures, + CloseSession, + } +} + +impl Query { + fn execute( + self, + state: &Arc, + session: &mut QuerySession, + ) -> Result { + match self { + Query::CloseSession => { + // The eof_read() handler of the BinROContent will take care of destroying the + // session when the userspace has finished reading the file + session.finalized = true; + Ok(QuerySuccess::None) + } + Query::GetVersion => Ok(QuerySuccess::GetVersion { + checksum: module_version().into(), + }), + Query::PushFeaturesConfig(query) => { + state.push_config(query).map(|()| QuerySuccess::None) + } + Query::PopFeaturesConfig { n } => match n { + PopConfigN::N(n) => state.pop_configs(n), + PopConfigN::All => state.pop_all_configs(), + } + .map(|i| QuerySuccess::PopFeaturesConfig { remaining: i }), + Query::StartFeatures => { + let stack = state.config_stack()?; + + let allowed_features: BTreeMap = all_features() + .map(|feature| (feature.name().into(), feature)) + .collect(); + let features: BTreeSet<_> = stack + .iter() + .flat_map(|config| config.enable_features.clone()) + .map(|name| match allowed_features.get(&name) { + Some(feature) => { + if feature.visibility() == Visibility::Public { + Ok(name) + } else { + Err(error!("Cannot enable private feature: {name}")) + } + } + None => Err(error!("Cannot enable inexistent feature: {name}")), + }) + .collect::>()?; + + let configs: Vec<_> = stack.iter().map(|config| config.config.clone()).collect(); + + let base_config = DependenciesSpec::new(); + let select = |feature: &dyn Feature| features.contains(feature.name()); + state.restart(move || features_lifecycle(select, base_config, configs))?; + Ok(QuerySuccess::None) + } + Query::StopFeatures => state.stop().map(|()| QuerySuccess::None), + Query::GetFeaturesConfig => { + let stack = state.config_stack()?; + Ok(QuerySuccess::GetFeaturesConfig(stack)) + } + Query::GetResources => Ok(QuerySuccess::GetResources { + features: all_features() + .map(|feature| (feature.name().into(), feature.resources())) + .collect(), + }), + Query::GetSchemas => Ok(QuerySuccess::GetSchemas { + query: schema_for!(Query), + }), + } + } +} + +query_type! { + #[derive(Serialize)] + enum QuerySuccess { + PopFeaturesConfig { + remaining: usize, + }, + GetFeaturesConfig(Vec), + GetVersion { + checksum: String, + }, + GetResources { + features: BTreeMap, + }, + GetSchemas { + query: Schema, + }, + #[serde(untagged)] + None, + } +} + +query_type! { + #[derive(Serialize)] + enum QueryResult { + Success(QuerySuccess), + Error(Error), + } +} + +query_type! { + #[derive(Serialize)] + enum QueriesResult { + Executed(Vec), + Error(E), + } +} + +impl QueriesResult { + fn to_json_string(&self) -> String + where + E: Serialize, + { + let mut s = match serde_json::to_string(self) { + Ok(s) => s, + // This should always serialize without any error + Err(err) => serde_json::to_string(&QueriesResult::Error(format!( + "Could not serialize the query result to JSON: {err}" + ))) + // Last resort + .expect("Could not serialize the query result to JSON"), + }; + s.push('\n'); + s + } +} + +pub type SessionId = u64; + +// Do not allow even reading for anyone else than the owner, as reading triggers the +// execution of the query. +const MODE: u16 = 0o600; + +pub struct QuerySession { + #[allow(dead_code)] + root: Folder, + #[allow(dead_code)] + execute_file: BinFile, + id: SessionId, + finalized: bool, +} + +impl QuerySession { + pub fn new(root: &mut Folder, state: Arc, id: SessionId) -> Result { + let name = Self::__name(id); + let mut root = Folder::new(root, &name)?; + let query_file = BinFile::new(&mut root, "query", 0o644, 1024 * 1024, BinRWContent::new())?; + + let execute = { + let state = Arc::clone(&state); + move || { + let res: Result, Error> = query_file.ops().with_content(|query| { + let query = core::str::from_utf8(query) + .map_err(|err| error!("Could not interpret query as UTF8: {err}"))?; + let queries: Vec = serde_json::from_str(query) + .map_err(|err| error!("Could not parse query: {err}"))?; + Ok(queries + .into_iter() + .map(|query| { + match state.with_session(id, |session| { + if session.finalized { + QueryResult::Error(error!( + "Session ID {id} was already finalized" + )) + } else { + match query.execute(&state, session) { + Ok(x) => QueryResult::Success(x), + Err(err) => QueryResult::Error(err), + } + } + }) { + Err(err) => QueryResult::Error(err), + Ok(res) => res, + } + }) + .collect()) + }); + let res = match res { + Ok(xs) => QueriesResult::Executed(xs), + Err(err) => QueriesResult::Error(err), + }; + let s = res.to_json_string(); + s.into_bytes() + } + }; + let execute_file = BinFile::new( + &mut root, + "execute", + // Do not allow even reading for anyone else than the owner, as reading triggers the + // execution of the query. + MODE, + 1024 * 1024, + BinROContent::new(execute, move || { + match state.with_session(id, |session| session.finalized) { + Ok(true) => { + let wq = state.wq(); + let state = Arc::clone(&state); + // It is not possible to remove kobjects from within the read handler (show() sysfs + // callback). All that could be done inline here is make the sysfs file/folder + // invisible but deallocation must take place somewhere else, so we use a workqueue + // for that. + new_attached_work_item!( + wq, + move |_work| { + state.close_session(id); + }, + |work| { + work.enqueue(0); + } + ); + } + Ok(false) => {} + err @ Err(_) => err.print_err(), + } + }), + )?; + + Ok(QuerySession { + root, + execute_file, + id, + finalized: false, + }) + } + + fn __name(id: SessionId) -> String { + format!("{id}") + } + + pub fn name(&self) -> String { + Self::__name(self.id) + } +} + +pub struct QueryService { + #[allow(dead_code)] + root: Arc>, + #[allow(dead_code)] + new_session_file: BinFile, + #[allow(dead_code)] + state: Arc, +} + +impl QueryService { + pub fn new(state: Arc) -> Result { + let mut root = Folder::sysfs_module_root(); + new_static_lockdep_class!(QUERIES_FOLDER_LOCKDEP_CLASS); + let root = Arc::new(Mutex::new( + Folder::new(&mut root, "queries")?, + QUERIES_FOLDER_LOCKDEP_CLASS.clone(), + )); + + let new_session = { + let root = Arc::clone(&root); + let state = Arc::clone(&state); + move || { + let name = state + .new_session(&mut root.lock(), Arc::clone(&state)) + .expect("Could not create query session"); + name.into_bytes() + } + }; + let max_size = 1024 * 1024; + let new_session_file = BinFile::new( + &mut root.lock(), + "new_session", + // Do not allow even reading for anyone else than the owner, as reading triggers the + // execution of the query. + MODE, + max_size, + BinROContent::new(new_session, || {}), + )?; + + Ok(QueryService { + root, + new_session_file, + state, + }) + } +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/registry.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/registry.rs deleted file mode 100644 index b05860ce53673061c0d69e942a25f73beda02d41..0000000000000000000000000000000000000000 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/registry.rs +++ /dev/null @@ -1,107 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -// FIXME: remove this module since we have linkme working - -/// # Safety -/// -/// The start() and stop() methods must return pointers to boundaries of the section in which the -/// values of type T are stored. -pub unsafe trait Registry { - type T: 'static; - const NAME: &'static str; - - fn start() -> *const Self::T; - fn stop() -> *const Self::T; - - fn iter() -> impl Iterator { - RegistryIter { - start: Self::start(), - stop: Self::stop(), - curr: None, - } - } -} - -struct RegistryIter { - start: *const T, - stop: *const T, - curr: Option<*const T>, -} - -impl Iterator for RegistryIter -where - T: 'static, -{ - type Item = &'static T; - - fn next(&mut self) -> Option { - let next = |ptr: *const T| { - if ptr >= self.stop { - (ptr, None) - } else { - let value: &'static T = unsafe { &*ptr }; - (unsafe { ptr.offset(1) }, Some(value)) - } - }; - - match self.curr { - None => { - let (curr, value) = next(self.start); - self.curr = Some(curr); - value - } - Some(curr) => { - let (curr, value) = next(curr); - self.curr = Some(curr); - value - } - } - } -} - -macro_rules! define_registry { - ($name:ident, $ty:ty) => { - pub struct $name; - const _: () = { - unsafe extern "C" { - #[link_name = concat!("__start_", "rust_registry_", stringify!($name))] - static START: (); - - #[link_name = concat!("__stop_", "rust_registry_", stringify!($name))] - static STOP: (); - } - - unsafe impl $crate::registry::Registry for $name { - type T = $ty; - const NAME: &'static str = stringify!($name); - - fn start() -> *const Self::T { - unsafe { &START as *const () as *const Self::T } - } - fn stop() -> *const Self::T { - unsafe { &STOP as *const () as *const Self::T } - } - } - - unsafe impl Send for $name where $ty: Send {} - unsafe impl Sync for $name where $ty: Sync {} - }; - }; -} - -macro_rules! add_to_registry { - ($name:ident, $value:expr) => { - const _: () = { - const NAME: &[u8] = stringify!($name).as_bytes(); - match <$name as $crate::registry::Registry>::NAME.as_bytes() { - NAME => {}, - _ => panic!("The original name as passed to define_registry!(, ...) of the registry must be used", - ) - } - - #[unsafe(link_section = concat!("rust_registry_", stringify!($name)))] - #[used] - static ENTRY: <$name as $crate::registry::Registry>::T = $value; - }; - }; -} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/cpumask.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/cpumask.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef23dae12e22568f9d0c6f0794114d2b763c0970 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/cpumask.rs @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use core::ffi::{c_int, c_uint}; + +use lisakmod_macros::inlinec::cfunc; + +pub type CpuId = c_uint; + +#[cfunc] +pub fn smp_processor_id() -> CpuId { + r#" + #include + "#; + r#" + return smp_processor_id(); + "# +} + +pub fn active_cpus() -> impl Iterator { + #[cfunc] + fn next(cpu: &mut c_int) -> bool { + r#" + #include + "#; + r#" + *cpu = cpumask_next(*cpu, cpu_active_mask); + return *cpu < nr_cpu_ids; + "# + } + + let mut cpu = -1; + core::iter::from_fn(move || { + if next(&mut cpu) { + let cpu = cpu.try_into().expect("Invalid CPU ID"); + Some(cpu) + } else { + None + } + }) +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/fs.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/fs.rs index 327c556ca11c47b42c37d600833b1d7d4fa4645b..56bce32fc1495d859ae5ce5665dec3f0bee6787b 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/fs.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/fs.rs @@ -15,6 +15,8 @@ use crate::{ runtime::printk::pr_err, }; +pub type FsMode = u16; + pub enum OpenFlags { ReadOnly, WriteOnly, @@ -30,7 +32,7 @@ impl From for c_int { } opaque_type!( - struct CFile, + pub struct CFile, "struct file", "linux/fs.h", ); @@ -42,12 +44,12 @@ pub struct File { } impl File { - pub fn open(path: &str, flags: OpenFlags, mode: u32) -> Result { + pub fn open(path: &str, flags: OpenFlags, mode: FsMode) -> Result { // kernel_file_open() would be more appropriate for in-kernel use, as filp_open() opens in // the context of the current userspace thread. It's somewhat ok since we only open files // during module init, and this runs as root anyway. #[cfunc] - fn filp_open(path: &CStr, flags: c_int, mode: u32) -> Result, PtrError> { + fn filp_open(path: &CStr, flags: c_int, mode: FsMode) -> Result, PtrError> { r#" #include #include "introspection.h" diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/irqflags.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/irqflags.rs new file mode 100644 index 0000000000000000000000000000000000000000..ca0e72fdb2d1dd482d4c71b00fdc22d7e0fdf06c --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/irqflags.rs @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use core::ffi::c_ulong; + +use lisakmod_macros::inlinec::cfunc; + +pub struct LocalIrqDisabledGuard { + flags: c_ulong, +} + +pub fn local_irq_save() -> LocalIrqDisabledGuard { + #[cfunc] + pub fn local_irq_save() -> c_ulong { + r#" + #include + "#; + r#" + unsigned long flags; + local_irq_save(flags); + return flags; + "# + } + + LocalIrqDisabledGuard { + flags: local_irq_save(), + } +} + +impl Drop for LocalIrqDisabledGuard { + fn drop(&mut self) { + #[cfunc] + pub fn local_irq_restore(flags: c_ulong) { + r#" + #include + "#; + r#" + // In C, "unsigned long *" is incompatible with "unsigned long long*" even when they + // are both the same size. On 64bits platform, Rust c_ulong is u64, which translates + // over FFI as uint64_t, which is "unsigned long long". + // + // To avoid type check warnings, provide an actual "unsigned long" to + // local_irq_restore(). + unsigned long _flags = flags; + local_irq_restore(_flags); + "# + } + + local_irq_restore(self.flags) + } +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/mod.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/mod.rs index 89e038de84fb271e6ef3560675db44b01e8539d8..747e4383934b13dda577d1b53a96ce5be8e5c61c 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/mod.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/mod.rs @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ pub mod alloc; +pub mod cpumask; pub mod fs; +pub mod irqflags; pub mod kbox; pub mod module; pub mod panic; @@ -10,6 +12,7 @@ pub mod sync; pub mod sysfs; pub mod traceevent; pub mod tracepoint; +pub mod version; pub mod wq; // TODO: remove ? diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/panic.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/panic.rs index 23880cd0fcdb396a4cdce53fa23c1b1788527f58..5bdd60c6a55820768dc1623be57b51b2e31b07bc 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/panic.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/panic.rs @@ -5,7 +5,7 @@ use core::fmt::Write; use lisakmod_macros::inlinec::cfunc; use crate::{ - fmt::{KBoxWriter, PRINT_PREFIX}, + fmt::{KBoxWriter, print_prefix}, runtime::alloc::{GFPFlags, KmallocAllocator}, }; @@ -24,15 +24,15 @@ fn _panic(info: &core::panic::PanicInfo) -> ! { let msg = info.message(); match msg.as_str() { - Some(s) => panic(PRINT_PREFIX.as_bytes(), s.as_bytes()), + Some(s) => panic(print_prefix().as_bytes(), s.as_bytes()), None => { KBoxWriter::, _>::with_writer( - PRINT_PREFIX, + print_prefix(), "[...]", 128, |mut writer| { // Not much we can do with a write error here since we already are panicking. - let _ = write!(writer, "{}", msg); + let _ = write!(writer, "{msg}"); panic(&b""[..], writer.written()) }, ); diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/printk.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/printk.rs index 974c08ae576ce0e7bd6357b8504f631eaa032ec7..449d95504952d5cc250f078027e78a7390042ca4 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/printk.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/printk.rs @@ -5,7 +5,7 @@ use core::fmt::Write as _; use lisakmod_macros::inlinec::cfunc; use crate::{ - fmt::{KBoxWriter, PRINT_PREFIX}, + fmt::{KBoxWriter, print_prefix}, runtime::alloc::{GFPFlags, KmallocAllocator}, }; @@ -58,10 +58,10 @@ pub fn __pr_level_impl(level: DmesgLevel, fmt: core::fmt::Arguments<'_>) -> core match fmt.as_str() { // If the format is just a plain string, we can simply display it directly, no need to // allocate anything. - Some(s) => write_dmesg(level, PRINT_PREFIX, s), + Some(s) => write_dmesg(level, print_prefix(), s), None => { KBoxWriter::, _>::with_writer( - PRINT_PREFIX, + print_prefix(), "[...]", 128, |mut writer| { diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sync.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sync.rs index 174db95e41dead99eae252520b76c593ca740a19..8f8b396958b80b8c59b8406430aba0f578f8ccf2 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sync.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sync.rs @@ -1,9 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::{boxed::Box, sync::Arc}; +use alloc::{boxed::Box, ffi::CString, sync::Arc}; use core::{ cell::{Cell, UnsafeCell}, - ffi::c_void, + ffi::{CStr, c_uint, c_void}, marker::PhantomData, ops::{Deref, DerefMut}, pin::Pin, @@ -11,31 +11,39 @@ use core::{ use lisakmod_macros::inlinec::{cfunc, opaque_type}; -use crate::{mem::impl_from_contained, runtime::kbox::KernelKBox}; +use crate::{ + mem::{NotSend, impl_from_contained}, + runtime::kbox::KernelKBox, +}; -opaque_type!(struct CLockClassKey, "struct lock_class_key", "linux/lockdep.h"); +opaque_type!(pub struct CLockClassKey, "struct lock_class_key", "linux/lockdep.h"); // Inner type that we can make Sync, as UnsafeCell is !Sync struct InnerLockdepClass { c_key: UnsafeCell, + name: CString, } -unsafe impl Send for InnerLockdepClass {} -unsafe impl Sync for InnerLockdepClass {} - -pub struct LockdepClass { +#[derive(Clone)] +enum AllocatedLockdepClass { // SAFETY: InnerLockdepClass needs to be pinned, as CLockClassKey is part of some linked lists. - inner: Pin>, + Dyn(Pin>), + Static { + key: &'static CLockClassKey, + name: &'static CStr, + }, } -impl Default for LockdepClass { - fn default() -> Self { - Self::new() - } +unsafe impl Send for AllocatedLockdepClass {} +unsafe impl Sync for AllocatedLockdepClass {} + +#[derive(Clone)] +pub struct LockdepClass { + inner: AllocatedLockdepClass, } impl LockdepClass { - pub fn new() -> LockdepClass { + pub fn new(name: &str) -> LockdepClass { #[cfunc] unsafe fn lockdep_register_key(key: *mut CLockClassKey) { r#" @@ -52,8 +60,9 @@ impl LockdepClass { .expect("Could not allocate lock_class_key"), ); + let name = CString::new(name).expect("Cannot convert lockdep class name to C string"); let this = LockdepClass { - inner: Arc::pin(InnerLockdepClass { c_key }), + inner: AllocatedLockdepClass::Dyn(Arc::pin(InnerLockdepClass { c_key, name })), }; unsafe { lockdep_register_key(this.get_key()); @@ -61,8 +70,27 @@ impl LockdepClass { this } + pub const fn __internal_from_ref( + key: &'static CLockClassKey, + name: &'static CStr, + ) -> LockdepClass { + LockdepClass { + inner: AllocatedLockdepClass::Static { key, name }, + } + } + fn get_key(&self) -> *mut CLockClassKey { - UnsafeCell::get(&self.inner.c_key) + match &self.inner { + AllocatedLockdepClass::Dyn(inner) => UnsafeCell::get(&inner.c_key), + AllocatedLockdepClass::Static { key, .. } => *key as *const _ as *mut _, + } + } + + fn c_name(&self) -> &CStr { + match &self.inner { + AllocatedLockdepClass::Dyn(inner) => inner.name.as_c_str(), + AllocatedLockdepClass::Static { name, .. } => name, + } } } @@ -78,28 +106,83 @@ impl Drop for LockdepClass { lockdep_unregister_key(key); "# } - unsafe { - lockdep_unregister_key(self.get_key()); + + match self.inner { + AllocatedLockdepClass::Dyn(_) => unsafe { + lockdep_unregister_key(self.get_key()); + }, + AllocatedLockdepClass::Static { .. } => {} } } } +macro_rules! new_static_lockdep_class { + ($vis:vis $name:ident) => { + $vis static $name: $crate::runtime::sync::LockdepClass = $crate::runtime::sync::LockdepClass::__internal_from_ref( + { + #[::lisakmod_macros::inlinec::cstatic] + static STATIC_LOCKDEP_CLASS_KEY: $crate::runtime::sync::CLockClassKey = ( + "#include ", + "struct lock_class_key STATIC_VARIABLE;" + ); + unsafe { + &STATIC_LOCKDEP_CLASS_KEY + } + }, + match ::core::ffi::CStr::from_bytes_with_nul( + ::core::concat!(::core::stringify!($name),"\0").as_bytes() + ) { + Ok(s) => s, + Err(_) => unreachable!(), + } + ); + }; +} +#[allow(unused_imports)] +pub(crate) use new_static_lockdep_class; + +pub trait LockdepSubclass: Default { + fn to_u32(&self) -> u32; +} + +impl LockdepSubclass for u32 { + #[inline] + fn to_u32(&self) -> u32 { + *self + } +} + +impl LockdepSubclass for () { + #[inline] + fn to_u32(&self) -> u32 { + 0 + } +} + +type DefaultSubclass = (); + pub trait LockGuard<'guard> where Self: Deref, { type T; + type Subclass; } pub trait Lock where Self: Sync, { + type Subclass: LockdepSubclass; type T; - type Guard<'a>: LockGuard<'a, T = Self::T> + type Guard<'a>: LockGuard<'a, Subclass = Self::Subclass, T = Self::T> where Self: 'a; - fn lock(&self) -> Self::Guard<'_>; + #[inline] + fn lock(&self) -> Self::Guard<'_> { + self.lock_nested(::default()) + } + fn lock_nested(&self, subclass: Self::Subclass) -> Self::Guard<'_>; #[inline] fn with_lock U>(&self, f: F) -> U { @@ -158,14 +241,14 @@ where /// it is possible to just copy it, and such reference can then be used to gain access to a ``&mut /// T``. pub unsafe trait PinnableLock: Lock {} -unsafe impl PinnableLock for Mutex {} -unsafe impl PinnableLock for SpinLock {} +unsafe impl PinnableLock for Mutex {} +unsafe impl PinnableLock for SpinLock {} pub struct PinnedLock { inner: L, } -impl_from_contained!((L) PinnedLock, inner: L); +impl_from_contained!((L,) PinnedLock, inner: L); impl PinnedLock where @@ -178,25 +261,37 @@ where #[inline] pub fn lock<'a>(self: Pin<&'a Self>) -> PinnedGuard<'a, L> { + self.lock_nested(<::Subclass as Default>::default()) + } + + #[inline] + pub fn lock_nested<'a>( + self: Pin<&'a Self>, + subclass: ::Subclass, + ) -> PinnedGuard<'a, L> { // SAFETY: As per PinnableLock guarantees, there is nothing else that can leak an &mut T, // so we can safely create a PinnedGuard that will allow getting an Pin<&mut T> PinnedGuard { - guard: self.get_ref().inner.lock(), + guard: self.get_ref().inner.lock_nested(subclass), } } } opaque_type!(struct CSpinLock, "spinlock_t", "linux/spinlock.h"); -pub struct SpinLock { +pub struct SpinLock { data: UnsafeCell, // The Rust For Linux binding pins the spinlock binding, so do the same here to avoid any // problems. c_lock: Pin>>, + // SAFETY: This needs to be freed after the CSpinLock that makes use of it as part of its + // lockdep map. + #[allow(dead_code)] lockdep_class: LockdepClass, + _phantom: PhantomData, } -impl SpinLock { +impl SpinLock { #[inline] pub fn new(x: T, lockdep_class: LockdepClass) -> Self { #[cfunc] @@ -223,6 +318,7 @@ impl SpinLock { c_lock, lockdep_class, data: x.into(), + _phantom: PhantomData, } } @@ -240,7 +336,7 @@ impl SpinLock { } } -impl Drop for SpinLock { +impl Drop for SpinLock { #[inline] fn drop(&mut self) { if self.is_locked() { @@ -249,12 +345,14 @@ impl Drop for SpinLock { } } -pub struct SpinLockGuard<'a, T> { - lock: &'a SpinLock, +pub struct SpinLockGuard<'a, T, Subclass> { + lock: &'a SpinLock, flags: u64, + // The kernel API forces us to unlock a lock in the same task that locked it. + _notsend: NotSend, } -impl SpinLockGuard<'_, T> { +impl SpinLockGuard<'_, T, Subclass> { #[inline] #[allow(clippy::mut_from_ref)] fn get_mut(&self) -> &mut T { @@ -270,7 +368,7 @@ impl SpinLockGuard<'_, T> { } } -impl Deref for SpinLockGuard<'_, T> { +impl Deref for SpinLockGuard<'_, T, Subclass> { type Target = T; #[inline] fn deref(&self) -> &T { @@ -278,14 +376,14 @@ impl Deref for SpinLockGuard<'_, T> { } } -impl DerefMut for SpinLockGuard<'_, T> { +impl DerefMut for SpinLockGuard<'_, T, Subclass> { #[inline] fn deref_mut(&mut self) -> &mut T { self.get_mut() } } -impl Drop for SpinLockGuard<'_, T> { +impl Drop for SpinLockGuard<'_, T, Subclass> { #[inline] fn drop(&mut self) { #[cfunc] @@ -300,61 +398,83 @@ impl Drop for SpinLockGuard<'_, T> { } } -impl<'guard, T> LockGuard<'guard> for SpinLockGuard<'guard, T> { +impl<'guard, T, Subclass> LockGuard<'guard> for SpinLockGuard<'guard, T, Subclass> { type T = T; + type Subclass = Subclass; } -unsafe impl Sync for SpinLock {} -unsafe impl Send for SpinLock {} +unsafe impl Sync for SpinLock {} +unsafe impl Send for SpinLock {} -impl Lock for SpinLock { +impl Lock for SpinLock +where + T: Send, + Subclass: LockdepSubclass, +{ + type Subclass = Subclass; type T = T; type Guard<'a> - = SpinLockGuard<'a, Self::T> + = SpinLockGuard<'a, Self::T, Self::Subclass> where Self: 'a; #[inline] - fn lock(&self) -> Self::Guard<'_> { + fn lock_nested(&self, subclass: Self::Subclass) -> Self::Guard<'_> { #[cfunc] - fn spinlock_lock(lock: &UnsafeCell) -> u64 { + fn spinlock_lock(lock: &UnsafeCell, subclass: c_uint) -> u64 { "#include "; r#" unsigned long flags; - spin_lock_irqsave(lock, flags); + spin_lock_irqsave_nested(lock, flags, subclass); return (uint64_t)flags; "# } - let flags = spinlock_lock(&self.c_lock); + let flags = spinlock_lock(&self.c_lock, subclass.to_u32()); - SpinLockGuard { lock: self, flags } + SpinLockGuard { + lock: self, + flags, + _notsend: NotSend::new(), + } } } -impl<'a, T: 'a + Send> _LockMut<'a> for SpinLock {} +impl<'a, T, Subclass> _LockMut<'a> for SpinLock +where + T: 'a + Send, + Subclass: 'a + LockdepSubclass, +{ +} opaque_type!(pub struct CMutex, "struct mutex", "linux/mutex.h"); enum AllocatedCMutex { - KBox(Pin>>, LockdepClass), - Static(Pin<&'static UnsafeCell>), + KBox { + c_mutex: Pin>>, + #[allow(dead_code)] + lockdep_class: LockdepClass, + }, + Static { + c_mutex: Pin<&'static UnsafeCell>, + }, } impl AllocatedCMutex { #[inline] fn as_pin_ref(&self) -> Pin<&UnsafeCell> { match self { - AllocatedCMutex::KBox(c_mutex, _) => c_mutex.as_ref(), - AllocatedCMutex::Static(c_mutex) => *c_mutex, + AllocatedCMutex::KBox { c_mutex, .. } => c_mutex.as_ref(), + AllocatedCMutex::Static { c_mutex, .. } => *c_mutex, } } } macro_rules! new_static_mutex { ($vis:vis $name:ident, $ty:ty, $data:expr) => { - $vis static $name: $crate::runtime::sync::Mutex<$ty> = - $crate::runtime::sync::Mutex::__internal_from_ref($data, { + $vis static $name: $crate::runtime::sync::Mutex<$ty> = $crate::runtime::sync::Mutex::__internal_from_ref( + $data, + { #[::lisakmod_macros::inlinec::cstatic] static STATIC_MUTEX: $crate::runtime::sync::CMutex = ( "#include ", @@ -375,23 +495,28 @@ macro_rules! new_static_mutex { &*( &STATIC_MUTEX as *const _ as *const ::core::cell::UnsafeCell<_>) ) } - }); + } + ); }; } #[allow(unused_imports)] pub(crate) use new_static_mutex; -pub struct Mutex { +pub struct Mutex { data: UnsafeCell, // struct mutex contains a list_head, so it must be pinned. c_mutex: AllocatedCMutex, + _phantom: PhantomData, } -impl Mutex { +impl Mutex { #[inline] pub fn new(x: T, lockdep_class: LockdepClass) -> Self { #[cfunc] - fn mutex_alloc(lockdep_key: *mut CLockClassKey) -> Pin>> { + fn mutex_alloc( + lockdep_key: *mut CLockClassKey, + name: &CStr, + ) -> Pin>> { r#" #include #include @@ -402,23 +527,74 @@ impl Mutex { struct mutex *mutex = kzalloc(sizeof(struct mutex), GFP_KERNEL); if (mutex) { mutex_init(mutex); - lockdep_set_class(mutex, lockdep_key); + lockdep_set_class_and_name(mutex, lockdep_key, name); } return mutex; "# } - let c_mutex = mutex_alloc(lockdep_class.get_key()); + let c_mutex = mutex_alloc(lockdep_class.get_key(), lockdep_class.c_name()); Mutex { - c_mutex: AllocatedCMutex::KBox(c_mutex, lockdep_class), + c_mutex: AllocatedCMutex::KBox { + c_mutex, + lockdep_class, + }, data: x.into(), + _phantom: PhantomData, } } + // FIXME: this can only be done before the lock is locked for the first time, so disable this + // code for now as there is no immediate need. + // pub fn set_lockdep_class(&mut self, lockdep_class: LockdepClass) { + // assert!( + // !self.is_locked(), + // "Changing lockdep class while lock is taken will lead to a lock imbalance warning" + // ); + // #[cfunc] + // fn set_lockdep_class( + // mutex: Pin<&UnsafeCell>, + // lockdep_key: *mut CLockClassKey, + // name: &CStr, + // ) { + // r#" + // #include + // #include + // #include + // "#; + + // r#" + // lockdep_set_class_and_name(mutex, lockdep_key, name); + // "# + // } + // set_lockdep_class( + // self.c_mutex.as_pin_ref(), + // lockdep_class.get_key(), + // lockdep_class.c_name(), + // ); + // } + + // pub fn disable_lockdep(&mut self) { + // #[cfunc] + // fn lockdep_set_novalidate_class(mutex: Pin<&UnsafeCell>) { + // r#" + // #include + // #include + // #include + // "#; + + // r#" + // lockdep_set_novalidate_class(mutex); + // "# + // } + // lockdep_set_novalidate_class(self.c_mutex.as_pin_ref()); + // } + #[inline] pub const fn __internal_from_ref(x: T, c_mutex: Pin<&'static UnsafeCell>) -> Self { Mutex { - c_mutex: AllocatedCMutex::Static(c_mutex), + c_mutex: AllocatedCMutex::Static { c_mutex }, data: UnsafeCell::new(x), + _phantom: PhantomData, } } @@ -436,9 +612,9 @@ impl Mutex { } } -impl_from_contained!((T) Mutex, data: T); +impl_from_contained!((T, Subclass) Mutex, data: T); -impl Drop for Mutex { +impl Drop for Mutex { #[inline] fn drop(&mut self) { if self.is_locked() { @@ -447,11 +623,13 @@ impl Drop for Mutex { } } -pub struct MutexGuard<'a, T> { - mutex: &'a Mutex, +pub struct MutexGuard<'a, T, Subclass> { + mutex: &'a Mutex, + // The kernel API forces us to unlock a lock in the same task that locked it. + _notsend: NotSend, } -impl MutexGuard<'_, T> { +impl MutexGuard<'_, T, Subclass> { #[inline] #[allow(clippy::mut_from_ref)] fn get_mut(&self) -> &mut T { @@ -467,7 +645,7 @@ impl MutexGuard<'_, T> { } } -impl Deref for MutexGuard<'_, T> { +impl Deref for MutexGuard<'_, T, Subclass> { type Target = T; #[inline] fn deref(&self) -> &T { @@ -475,14 +653,14 @@ impl Deref for MutexGuard<'_, T> { } } -impl DerefMut for MutexGuard<'_, T> { +impl DerefMut for MutexGuard<'_, T, Subclass> { #[inline] fn deref_mut(&mut self) -> &mut T { self.get_mut() } } -impl Drop for MutexGuard<'_, T> { +impl Drop for MutexGuard<'_, T, Subclass> { #[inline] fn drop(&mut self) { #[cfunc] @@ -497,39 +675,52 @@ impl Drop for MutexGuard<'_, T> { } } -impl<'guard, T> LockGuard<'guard> for MutexGuard<'guard, T> { +impl<'guard, T, Subclass> LockGuard<'guard> for MutexGuard<'guard, T, Subclass> { type T = T; + type Subclass = Subclass; } -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} -impl Lock for Mutex { +impl Lock for Mutex +where + Subclass: LockdepSubclass, +{ + type Subclass = Subclass; type T = T; type Guard<'a> - = MutexGuard<'a, Self::T> + = MutexGuard<'a, Self::T, Self::Subclass> where Self: 'a; #[inline] - fn lock(&self) -> Self::Guard<'_> { + fn lock_nested(&self, subclass: Self::Subclass) -> Self::Guard<'_> { #[cfunc] - fn mutex_lock(mutex: &UnsafeCell) { + fn mutex_lock_nested(mutex: &UnsafeCell, subclass: c_uint) { "#include "; r#" - return mutex_lock(mutex); + return mutex_lock_nested(mutex, subclass); "# } - mutex_lock(&self.c_mutex.as_pin_ref()); + mutex_lock_nested(&self.c_mutex.as_pin_ref(), subclass.to_u32()); - MutexGuard { mutex: self } + MutexGuard { + mutex: self, + _notsend: NotSend::new(), + } } } -impl<'a, T: 'a + Send> _LockMut<'a> for Mutex {} +impl<'a, T, Subclass> _LockMut<'a> for Mutex +where + T: 'a + Send, + Subclass: 'a + LockdepSubclass, +{ +} -pub struct Rcu { +pub struct Rcu { // This pointer is actually a Box in disguise obtained with Box::into_raw(). We keep it as a // raw pointer so that we can use C's rcu_assign_pointer() on it. // @@ -539,10 +730,13 @@ pub struct Rcu { data: Cell<*const T>, // Mutex used to protect the writers. - writer_mutex: Mutex<()>, + writer_mutex: Mutex<(), Subclass>, } -impl Rcu { +impl Rcu +where + Subclass: LockdepSubclass, +{ pub fn new(data: T, lockdep_class: LockdepClass) -> Self { Rcu { data: Cell::new(Box::into_raw(Box::new(data))), @@ -604,16 +798,19 @@ impl Rcu { } } -pub struct RcuGuard<'a, T> { +pub struct RcuGuard<'a, T, Subclass> { data: *const T, - _phantom: PhantomData<&'a T>, + _phantom: PhantomData<(&'a T, Subclass)>, + // The kernel API forces us to unlock a lock in the same task that locked it. + _notsend: NotSend, } -impl<'guard, T> LockGuard<'guard> for RcuGuard<'guard, T> { +impl<'guard, T, Subclass> LockGuard<'guard> for RcuGuard<'guard, T, Subclass> { type T = T; + type Subclass = Subclass; } -impl Deref for RcuGuard<'_, T> { +impl Deref for RcuGuard<'_, T, Subclass> { type Target = T; #[inline] @@ -624,7 +821,7 @@ impl Deref for RcuGuard<'_, T> { } } -impl Drop for RcuGuard<'_, T> { +impl Drop for RcuGuard<'_, T, Subclass> { fn drop(&mut self) { #[cfunc] fn rcu_unlock() { @@ -638,18 +835,23 @@ impl Drop for RcuGuard<'_, T> { } } -unsafe impl Sync for Rcu {} -unsafe impl Send for Rcu {} +unsafe impl Sync for Rcu {} +unsafe impl Send for Rcu {} -impl Lock for Rcu { +impl Lock for Rcu +where + T: Send, + Subclass: LockdepSubclass, +{ + type Subclass = Subclass; type T = T; type Guard<'a> - = RcuGuard<'a, Self::T> + = RcuGuard<'a, Self::T, Self::Subclass> where Self: 'a; #[inline] - fn lock(&self) -> Self::Guard<'_> { + fn lock_nested(&self, _subclass: Subclass) -> Self::Guard<'_> { #[cfunc] fn rcu_lock(ptr: *const c_void) -> *const c_void { "#include "; @@ -671,6 +873,35 @@ impl Lock for Rcu { RcuGuard { data, _phantom: PhantomData, + _notsend: NotSend::new(), + } + } +} + +pub struct LockdepGuard {} +impl Drop for LockdepGuard { + fn drop(&mut self) { + #[cfunc] + fn lockdep_on() { + "#include "; + + r#" + lockdep_on(); + "# } + lockdep_on(); + } +} + +pub fn disable_lockdep() -> LockdepGuard { + #[cfunc] + fn lockdep_off() { + "#include "; + + r#" + lockdep_off(); + "# } + lockdep_off(); + LockdepGuard {} } diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sysfs.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sysfs.rs index 9a7b5a068986af81635b90204bf62d2430e4ae6e..02ded1e4424a802e47a369aac30bec5c0d301b90 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sysfs.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/sysfs.rs @@ -1,21 +1,30 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::{boxed::Box, string::String, sync::Arc}; +use alloc::{boxed::Box, ffi::CString, string::String, sync::Arc, vec::Vec}; use core::{ + any::Any, cell::UnsafeCell, + cmp::{max, min}, convert::Infallible, - ffi::{c_int, c_uint, c_void}, + ffi::{CStr, c_longlong, c_uint, c_void}, marker::PhantomData, mem::MaybeUninit, pin::Pin, }; -use lisakmod_macros::inlinec::{ceval, cexport, cfunc, opaque_type}; +use lisakmod_macros::inlinec::{NegativeError, c_realchar, ceval, cexport, cfunc, opaque_type}; -use crate::mem::{container_of, mut_container_of}; +use crate::{ + error::{Error, error}, + mem::{FromContained, container_of, impl_from_contained, mut_container_of}, + runtime::{ + fs::{CFile, FsMode}, + sync::{Lock as _, Mutex, new_static_lockdep_class}, + }, +}; opaque_type!( - struct CKObj, + pub struct CKObj, "struct kobject", "linux/kobject.h", attr_accessors {ktype: &'a CKObjType}, @@ -80,7 +89,17 @@ impl KObjType { "# } - let release_f: unsafe extern "C" fn(*mut CKObj) = release; + let release_f: unsafe extern "C" fn(*mut CKObj) = { + #[cfg(not(test))] + { + Some(release) + } + #[cfg(test)] + { + None + } + } + .unwrap(); let init = |this| init_kobj_type(this, release_f as *const c_void); let c_kobj_type = unsafe { CKObjType::new_stack(init) }.unwrap(); KObjType { c_kobj_type } @@ -264,7 +283,7 @@ pub trait KObjectState: private::Sealed {} struct SysfsSpec { name: String, - parent: Option>, + parent: Option>, } #[derive(Default)] @@ -275,13 +294,9 @@ impl private::Sealed for Init {} impl KObjectState for Init {} #[derive(Default)] -pub struct Finalized {} -impl private::Sealed for Finalized {} -impl KObjectState for Finalized {} - -#[derive(Debug)] -#[non_exhaustive] -pub enum Error {} +pub struct Published {} +impl private::Sealed for Published {} +impl KObjectState for Published {} pub struct KObject { // Use a pointer, so Rust does not make any assumption on the validity of the pointee. This @@ -297,9 +312,9 @@ pub struct KObject { // SAFETY: Nothing in KObject cares about what thread it is on, so it can be sent around without // issues. unsafe impl Send for KObject {} -// SAFETY: In the Finalized state, all APIs are as thread-safe as KObjectInner is. In the Init +// SAFETY: In the Published state, all APIs are as thread-safe as KObjectInner is. In the Init // state, we are not thread safe as there is some builder state maintained in the KObject -unsafe impl Sync for KObject {} +unsafe impl Sync for KObject {} impl KObject { #[inline] @@ -319,27 +334,38 @@ impl KObject { pub fn add<'a, 'parent, 'name>( &'a mut self, - parent: Option<&'parent KObject>, + parent: Option<&'parent KObject>, name: &'name str, - ) where + ) -> Result<(), Error> + where 'parent: 'a, { - // FIXME: should we return an error or panic ? - if let Some(parent) = parent { - assert!( - parent.inner().is_in_sysfs(), - "The parent of KObject \"{name}\" was not added to sysfs" - ); - } + let parent = match parent { + Some(parent) => { + if parent.inner().is_in_sysfs() { + Ok(Some(parent)) + } else { + Err(error!( + "The parent of KObject \"{name}\" was not added to sysfs" + )) + } + } + None => Ok(None), + }?; self.state.sysfs = Some(SysfsSpec { name: name.into(), parent: parent.cloned(), }); + Ok(()) } - pub fn finalize(self) -> Result, Error> { + pub fn publish(self) -> Result, Error> { #[cfunc] - unsafe fn kobj_add(kobj: *mut CKObj, parent: *mut CKObj, name: &[u8]) -> Result<(), c_int> { + unsafe fn kobj_add( + kobj: *mut CKObj, + parent: *mut CKObj, + name: &[u8], + ) -> Result> { r#" #include #include @@ -355,13 +381,13 @@ impl KObject { Some(parent) => parent.c_kobj(), }; unsafe { kobj_add(self.c_kobj(), parent, spec.name.as_bytes()) } - .expect("Call to kobject_add() failed"); + .map_err(|err| error!("Call to kobject_add() failed: {err}"))?; } Ok(KObject::from_inner(self.inner())) } } -impl KObject { +impl KObject { fn from_inner(inner: &KObjectInner) -> Self { inner.incref(); KObject { @@ -369,6 +395,16 @@ impl KObject { state: Default::default(), } } + + /// # Safety + /// + /// The passed *mut CKObj must be a pointer valid for reads and writes + pub unsafe fn from_c_kobj(c_kobj: *mut CKObj) -> Self { + let inner = KObjectInner::from_c_kobj(c_kobj); + let inner = unsafe { inner.as_ref().expect("Unexpected NULL pointer") }; + Self::from_inner(inner) + } + #[inline] pub fn sysfs_module_root() -> Self { let c_kobj = ceval!("linux/kobject.h", "&THIS_MODULE->mkobj.kobj", *mut CKObj); @@ -389,6 +425,10 @@ impl KObject { fn c_kobj(&self) -> *mut CKObj { self.inner().c_kobj.get() } + + pub fn refcount(&self) -> u64 { + self.inner().refcount() + } } impl Drop for KObject { @@ -403,9 +443,9 @@ impl Drop for KObject { } } -// Only implement clone for the Finalized state so that we do not accidentally call kobject_add() +// Only implement clone for the Published state so that we do not accidentally call kobject_add() // twice on the same underlying "struct kobject". -impl Clone for KObject { +impl Clone for KObject { fn clone(&self) -> Self { // SAFETY: self.inner is always valid as long as a KObject points to it. let inner = unsafe { self.inner.as_ref().unwrap() }; @@ -413,14 +453,350 @@ impl Clone for KObject { } } -// FIXME: clean that up -// fn foo() { -// use crate::runtime::sysfs::{KObjType, KObject}; - -// let root = KObject::module_root(); -// let kobj_type = Arc::new(KObjType::new()); -// let kobject = KObject::new(kobj_type.clone()); -// let kobject2 = KObject::new(kobj_type.clone()); -// kobject.add(Some(&root), "foo"); -// kobject2.add(Some(&kobject), "bar"); -// } +pub struct Folder { + kobj: Arc>, +} + +impl Folder { + pub fn new(parent: &mut Folder, name: &str) -> Result { + let kobj_type = Arc::new(KObjType::new()); + let mut kobj = KObject::new(kobj_type.clone()); + kobj.add(Some(&parent.kobj), name)?; + let kobj = Arc::new(kobj.publish()?); + Ok(Folder { kobj }) + } + + pub fn sysfs_module_root() -> Folder { + Folder { + kobj: Arc::new(KObject::::sysfs_module_root()), + } + } +} + +opaque_type!( + struct _CBinAttribute, + "struct bin_attribute", + "linux/sysfs.h", +); + +#[repr(transparent)] +struct CBinAttribute(UnsafeCell<_CBinAttribute>); +unsafe impl Send for CBinAttribute {} +unsafe impl Sync for CBinAttribute {} + +// SAFETY: FileInner must be pinned as CBinAttribute will contain a pointer to it in its +// "private" member. +pub struct FileInner { + c_bin_attr: CBinAttribute, + // SAFETY: name needs to be dropped _after_ c_bin_attr, as c_bin_attr contains a reference to + // it. It therefore needs to be specified afterwards in the struct definition order. + name: CString, + parent_kobj: Arc>, + + // Use a trait object so that we can implement FileInner::from_attr(). This way, we can get an + // arbitrary pointer and all the necessary type information is dynamically represented in the + // data. + ops: Box, +} + +impl_from_contained!(()FileInner, c_bin_attr: CBinAttribute); + +impl FileInner { + unsafe fn from_attr<'a>(attr: *const UnsafeCell<_CBinAttribute>) -> Pin<&'a FileInner> { + let attr = attr as *const CBinAttribute; + let inner: *const FileInner = unsafe { FromContained::from_contained(attr) }; + let inner: &FileInner = unsafe { inner.as_ref().unwrap() }; + unsafe { Pin::new_unchecked(inner) } + } +} + +pub trait BinOps: Any + Send + Sync { + fn read(&self, offset: usize, out: &mut [u8]) -> Result>; + fn write(&self, offset: usize, in_: &[u8]) -> Result>; +} + +pub struct BinRWContent { + content: Mutex>, +} + +impl Default for BinRWContent { + fn default() -> Self { + Self::new() + } +} + +impl BinRWContent { + pub fn new() -> BinRWContent { + new_static_lockdep_class!(BIN_ATTR_CONTENT_LOCKDEP_CLASS); + BinRWContent { + content: Mutex::new(Vec::new(), BIN_ATTR_CONTENT_LOCKDEP_CLASS.clone()), + } + } + + pub fn with_content(&self, f: F) -> T + where + F: FnOnce(&[u8]) -> T, + { + f(&self.content.lock()) + } +} + +impl BinOps for BinRWContent { + fn read(&self, offset: usize, out: &mut [u8]) -> Result> { + match self.content.lock().get(offset..) { + None => Ok(0), + Some(in_) => { + let written = min(in_.len(), out.len()); + out[..written].copy_from_slice(&in_[..written]); + Ok(written) + } + } + } + + fn write(&self, offset: usize, in_: &[u8]) -> Result> { + let mut content = self.content.lock(); + // Since we don't know have an opening mode, we don't know if the user wants to + // write the data from scratch or simply edit the current content at the given + // offset. As a result, we clear any existing data when writing at offset 0, but + // writing at other offsets simply appends/updates. + // + // Unfortunately, "echo -n "" > foo" will not result in emptying the file, as + // userspace tools typically do not issue write() syscalls with count == 0. + // Instead, they just open it with O_WRONLY|O_CREAT|O_TRUNC, expecting the file to + // be truncated. Unfortunately, sysfs just ignores O_TRUNC so nothing particular + // happens and the file is not emptied. + let content_size = if offset == 0 { + content.clear(); + in_.len() + } else { + max(content.len(), offset.saturating_add(in_.len())) + }; + content.resize(content_size, 0); + + // Release memory if we are done with it, to avoid carrying a huge allocation forever just + // because of a single large write() happened in the lifetime of the file. + if content.capacity() > content.len().saturating_mul(4) { + content.shrink_to_fit() + } + + match content.get_mut(offset..) { + None => Err(NegativeError::EFBIG()), + Some(out) => { + let written = min(in_.len(), out.len()); + out[..written].copy_from_slice(&in_[..written]); + Ok(written) + } + } + } +} + +pub struct BinROContent { + inner: BinRWContent, + read: Box Vec + Send + Sync>, + eof_read: Box, +} + +impl BinROContent { + pub fn new(read: ReadF, eof_read: EofReadF) -> BinROContent + where + ReadF: 'static + Fn() -> Vec + Send + Sync, + EofReadF: 'static + Fn() + Send + Sync, + { + BinROContent { + inner: BinRWContent::new(), + read: Box::new(read), + eof_read: Box::new(eof_read), + } + } +} + +impl BinOps for BinROContent { + fn read(&self, offset: usize, out: &mut [u8]) -> Result> { + let inner = &self.inner; + if offset == 0 { + *inner.content.lock() = (self.read)(); + } + let count = inner.read(offset, out)?; + if count == 0 { + (self.eof_read)(); + } + Ok(count) + } + + fn write(&self, _offset: usize, _in: &[u8]) -> Result> { + Err(NegativeError::EINVAL()) + } +} + +pub struct BinFile { + // SAFETY: We need to pin FileInner as CBinAttribute contains a pointer to it. + inner: Pin>, + _phantom: PhantomData, +} +unsafe impl Send for BinFile {} + +impl BinFile +where + Ops: 'static + BinOps, +{ + #[inline] + pub fn name(&self) -> &str { + // It was originally a String, so it can be converted to &str for sure. + self.inner.name.to_str().unwrap() + } + + pub fn new( + parent: &mut Folder, + name: &str, + mode: FsMode, + max_size: usize, + ops: Ops, + ) -> Result, Error> { + #[cexport] + fn read( + _file: &mut CFile, + _c_kobj: &mut CKObj, + attr: *const UnsafeCell<_CBinAttribute>, + // We need to use an FFI type that will turn into "char *" rather than "unsigned char*" + // or "signed char *", otherwise CFI will get upset as that function will be used for + // indirect calls by the sysfs infrastructure. + out: *mut c_realchar, + offset: c_longlong, + count: usize, + ) -> Result> { + let inner = unsafe { FileInner::from_attr(attr) }; + let out = unsafe { core::slice::from_raw_parts_mut(out as *mut u8, count) }; + if offset < 0 { + let offset: isize = offset.try_into().unwrap(); + Err(NegativeError::new(offset)) + } else { + let offset: usize = offset.try_into().unwrap(); + inner.ops.read(offset, out) + } + } + + #[cexport] + fn write( + _file: &mut CFile, + _c_kobj: &mut CKObj, + attr: *const UnsafeCell<_CBinAttribute>, + in_: *mut c_realchar, + offset: c_longlong, + count: usize, + ) -> Result> { + let inner = unsafe { FileInner::from_attr(attr) }; + let in_ = unsafe { core::slice::from_raw_parts_mut(in_ as *mut u8, count) }; + if offset < 0 { + let offset: isize = offset.try_into().unwrap(); + Err(NegativeError::new(offset)) + } else { + let offset: usize = offset.try_into().unwrap(); + inner.ops.write(offset, in_) + } + } + + #[cfunc] + unsafe fn init_bin_attribute( + attr: *mut _CBinAttribute, + name: &CStr, + mode: FsMode, + read: *mut c_void, + write: *mut c_void, + ) { + r#" + #include + "#; + + r#" + sysfs_bin_attr_init(attr); + attr->attr.name = name; + attr->attr.mode = mode; + attr->read = read; + attr->write = write; + "# + } + + let name = CString::new(name) + .map_err(|err| error!("Could not convert file name to CString: {err}"))?; + + let inner = Box::into_pin(Box::new(FileInner { + // Allocate the CBinAttribute in a pinned box, and only then initialize it properly. + c_bin_attr: CBinAttribute(UnsafeCell::new( + unsafe { _CBinAttribute::new_stack(|_| Ok::<_, Infallible>(())) }.unwrap(), + )), + name, + parent_kobj: Arc::clone(&parent.kobj), + ops: Box::new(ops), + })); + unsafe { + init_bin_attribute( + inner.c_bin_attr.0.get(), + inner.name.as_c_str(), + mode, + read as *mut c_void, + write as *mut c_void, + ); + } + + #[cfunc] + unsafe fn create( + kobj: *mut CKObj, + attr: *mut _CBinAttribute, + inner: *mut c_void, + size: usize, + ) -> Result> { + r#" + #include + "#; + + r#" + attr->size = size; + attr->private = inner; + return sysfs_create_bin_file(kobj, attr); + "# + } + let ptr: *const FileInner = &*inner; + unsafe { + create( + inner.parent_kobj.c_kobj(), + inner.c_bin_attr.0.get(), + ptr as *mut c_void, + max_size, + ) + .map_err(|err| error!("sysfs_create_bin_file() failed: {err}"))?; + } + + Ok(BinFile { + inner, + _phantom: PhantomData, + }) + } + + pub fn ops(&self) -> &Ops { + let ops = &*self.inner.ops; + let ops = ops as &dyn Any; + ops.downcast_ref().unwrap() + } +} + +impl Drop for BinFile { + fn drop(&mut self) { + #[cfunc] + unsafe fn remove(kobj: *mut CKObj, attr: *mut _CBinAttribute) { + r#" + #include + "#; + + r#" + return sysfs_remove_bin_file(kobj, attr); + "# + } + + unsafe { + remove( + self.inner.parent_kobj.c_kobj(), + self.inner.c_bin_attr.0.get(), + ) + } + } +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/traceevent.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/traceevent.rs index e86aa64718945ed0499d7ecfbcb1c0c58da4fac3..f4da314fdd5582bf992cfa6fd2b143f9e57dc52f 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/traceevent.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/traceevent.rs @@ -16,6 +16,75 @@ where const NAME: &'static str; } +#[derive(Clone, Copy, Debug)] +pub struct TracepointString { + pub s: &'static CStr, +} +impl TracepointString { + pub fn __private_new(s: &'static CStr) -> TracepointString { + TracepointString { s } + } +} + +#[allow(unused_macros)] +macro_rules! new_tracepoint_string { + ($s:expr) => {{ + const S: &'static str = $s; + const SLICE: &'static [u8] = S.as_bytes(); + + const BUF_LEN: usize = SLICE.len(); + // +1 for the NULL terminator + static BUF: [u8; (BUF_LEN + 1)] = { + let mut arr = [0u8; BUF_LEN + 1]; + let mut idx = 0; + while idx < BUF_LEN { + arr[idx] = SLICE[idx]; + idx += 1; + } + assert!(arr[S.len()] == 0); + arr + }; + + // TODO: the tracepoint_string() C macro seems to be appropriate, but unfortunately does + // not work in a module (despite the doc mentioning it should), so instead we abuse the + // __trace_printk_fmt section directly. + + // Note that as of Linux v6.15 the ftrace infrastructure will copy the string the first + // time the module is loaded and then modify the BUF_ADDR pointer itself to point at where + // the string has been copied to. When the module is unloaded, the copy stays alive and + // will be re-used the next time the module is loaded, by modifying BUF_ADDR again. It is + // therefore critical to always load the string address from BUF_ADDR itself, and not let + // the compiler optimize-away that load. + // + // Use a "static mut" so that the __trace_printk_fmt section is configured for read/write + // by the linker, otherwise the kernel will try to modify it and crash as a result. + #[unsafe(link_section = "__trace_printk_fmt")] + static mut BUF_ADDR: $crate::mem::UnsafeSync<*const u8> = + $crate::mem::UnsafeSync(BUF.as_ptr()); + + let s2 = $crate::runtime::traceevent::TracepointString::__private_new(unsafe { + ::core::ffi::CStr::from_ptr(::core::ptr::read_volatile(&raw mut BUF_ADDR).0 as *const _) + }); + s2 + }}; +} + +#[allow(unused_imports)] +pub(crate) use new_tracepoint_string; + +impl FfiType for TracepointString { + const C_TYPE: &'static str = <&'static CStr as FfiType>::C_TYPE; + const C_HEADER: Option<&'static str> = <&'static CStr as FfiType>::C_HEADER; + type FfiType = <&'static CStr as FfiType>::FfiType; +} + +impl IntoFfi for TracepointString { + #[inline] + fn into_ffi(self) -> Self::FfiType { + self.s.into_ffi() + } +} + macro_rules! impl_field { ($ty:ty, $c_name:literal) => { impl FieldTy for $ty { @@ -36,6 +105,7 @@ impl_field!(u64, "u64"); impl_field!(i64, "s64"); impl_field!(&CStr, "c-string"); impl_field!(&str, "rust-string"); +impl_field!(TracepointString, "c-static-string"); macro_rules! new_event { ($name:ident, fields: {$($field_name:ident: $field_ty:ty),* $(,)?}) => {{ diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/tracepoint.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/tracepoint.rs index c1f7bf9c53553e4c6cad6174bf49627388c064cf..1860f7c212d36fb476c73b56b6ac8059dcb8772f 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/tracepoint.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/tracepoint.rs @@ -1,8 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::{boxed::Box, ffi::CString, vec::Vec}; +use alloc::{boxed::Box, ffi::CString}; use core::{ - any::Any, cell::UnsafeCell, ffi::{CStr, c_int, c_void}, fmt, @@ -12,13 +11,7 @@ use core::{ use lisakmod_macros::inlinec::{cfunc, opaque_type}; -use crate::{ - error::{Error, error}, - runtime::{ - printk::pr_debug, - sync::{Lock as _, LockdepClass, Mutex}, - }, -}; +use crate::error::{Error, error}; opaque_type!( struct CTracepoint, @@ -146,7 +139,7 @@ impl<'tp, Args> Tracepoint<'tp, Args> { // SAFETY: If the Probe is alive, then its closure is alive too. Probe will be kept alive // until the last RegisteredProbe is dropped, at which point we known that // tracepoint_probe_unregister() has been called. - unsafe { register(self.c_tp.get(), probe.probe, probe.closure) } + unsafe { register(self.c_tp.get(), probe.probe, probe.data_ptr()) } .map_err(|code| error!("Tracepoint probe registration failed: {code}"))?; Ok(RegisteredProbe { probe, tp: self }) } @@ -186,45 +179,6 @@ impl<'probe, Args> Drop for RegisteredProbe<'probe, Args> { "# } - let probe = self.probe; - // SAFETY: Since we only create RegisteredProbe values when the registration succeeded, we - // ensure that this will be the only matching call to tracepoint_probe_unregister() - unsafe { unregister(self.tp.c_tp.get(), probe.probe, probe.closure as *mut _) } - .expect("Failed to unregister tracepoint probe"); - - pr_debug!( - "Called tracepoint_probe_unregister() for a probe attached to {:?}", - self.tp - ); - } -} - -pub struct ProbeDropper { - droppers: Mutex>>>>, -} - -impl fmt::Debug for ProbeDropper { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("ProbeDropper").finish() - } -} - -impl Default for ProbeDropper { - fn default() -> Self { - Self::new() - } -} - -impl ProbeDropper { - pub fn new() -> ProbeDropper { - ProbeDropper { - droppers: Mutex::new(Vec::new(), LockdepClass::new()), - } - } -} - -impl Drop for ProbeDropper { - fn drop(&mut self) { #[cfunc] fn tracepoint_synchronize_unregister() { r#" @@ -236,29 +190,26 @@ impl Drop for ProbeDropper { "# } - tracepoint_synchronize_unregister(); + let probe = self.probe; + // SAFETY: Since we only create RegisteredProbe values when the registration succeeded, we + // ensure that this will be the only matching call to tracepoint_probe_unregister() + unsafe { unregister(self.tp.c_tp.get(), probe.probe, probe.data_ptr()) } + .expect("Failed to unregister tracepoint probe"); - pr_debug!("Called tracepoint_synchronize_unregister(), de-allocating closures now"); - - // SAFETY: If we are dropped, that means we are not borrowed anymore, and since: - // * RegisteredProbe borrows Probe us while the probe can fire, and - // * Probe borrows ProbeDropper while it is alive. - // then it means the only consumer of the closure left are the tracepoint themselves, which - // we ensured do not use it anymore since: - // * = dyn Fn + Send + Sync + 'probe; +pub type Closure<'probe, Args> = Pin>>; + pub struct Probe<'probe, Args> { - closure: *const c_void, probe: *const c_void, - _phantom: PhantomData<(&'probe (), Args)>, + // We need a 2nd layer of Box here so we have a thin pointer to share. The actual *const dyn + // Fn() pointer for the closure is a fat pointer since trait objects are unsized, and as such + // cannot fit in a *const c_void + closure: Box>, } impl<'probe, Args> Probe<'probe, Args> { @@ -266,51 +217,44 @@ impl<'probe, Args> Probe<'probe, Args> { /// /// The `probe` must be compatible with the [`Closure`] type in use. #[inline] - pub unsafe fn __private_new( - closure: Pin>, + pub unsafe fn __private_new( + closure: Closure<'probe, Args>, probe: *const c_void, - // SAFETY: we borrow ProbeDropper for &'probe, which ensures the ProbeDropper will not be - // dropped before Probe<'probe, _> - dropper: &'probe ProbeDropper, - ) -> Probe<'probe, Args> - where - Closure: 'static + Send + Sync, - { - let ptr: &Closure = &closure; - let ptr: *const Closure = ptr; - - dropper.droppers.lock().push(Some(closure)); - // SAFETY: the probe and closure pointer we store here are guaranteed to be valid for the - // lifetime of the Probe object, as we borrow the ProbeDropper for that duration. - Probe { - closure: ptr as *const c_void, - probe, - _phantom: PhantomData, - } + ) -> Probe<'probe, Args> { + let closure = Box::new(closure); + Probe { closure, probe } + } + + fn data_ptr(&self) -> *const c_void { + // SAFETY: This must be kept in sync with the probe function defined in new_probe!() + let ref_: &Closure<'probe, Args> = self.closure.as_ref(); + ref_ as *const Closure<'probe, Args> as *const c_void } } -// SAFETY: this is ok as the closure we store is also Send +// SAFETY: this is ok as the DynClosure we store is also Send unsafe impl<'probe, Args> Send for Probe<'probe, Args> {} -// SAFETY: this is ok as the closure we store is also Sync +// SAFETY: this is ok as the DynClosure we store is also Sync unsafe impl<'probe, Args> Sync for Probe<'probe, Args> {} +#[allow(unused_macros)] macro_rules! new_probe { - ($dropper:expr, ( $($arg_name:ident: $arg_ty:ty),* ) $body:block) => { + (( $($arg_name:ident: $arg_ty:ty),* ) $body:block) => { { - // SAFETY: We need to ensure Send and Sync for the closure, as Probe relies on that to - // soundly implement Send and Sync - type Closure = impl Fn($($arg_ty),*) + ::core::marker::Send + ::core::marker::Sync + 'static; - let closure: ::core::pin::Pin<::alloc::boxed::Box> = ::alloc::boxed::Box::pin( + type Args = ($($arg_ty,)*); + let closure: $crate::runtime::tracepoint::Closure = ::alloc::boxed::Box::pin( move |$($arg_name: $arg_ty),*| { $body } ); + // Use *mut c_void as we get clang CFI violations otherwise #[::lisakmod_macros::inlinec::cexport] - fn probe(closure: *const c_void, $($arg_name: $arg_ty),*) { - let closure = closure as *const Closure; + fn probe(closure: *mut ::core::ffi::c_void, $($arg_name: $arg_ty),*) { + // SAFETY: this is the same type as what Probe::data_ptr() provides. + let closure = closure as *const $crate::runtime::tracepoint::Closure<'static, Args>; + // SAFETY: Since we call tracepoint_probe_unregister() in ::drop(), and RegisteredProbe keeps the Probe and its closure alive, then // the probe should never run after the closure is dropped. @@ -320,10 +264,9 @@ macro_rules! new_probe { #[allow(unused_unsafe)] unsafe { - $crate::runtime::tracepoint::Probe::<( $($arg_ty),* )>::__private_new( + $crate::runtime::tracepoint::Probe::<( $($arg_ty,)* )>::__private_new( closure, - probe as *const c_void, - $dropper, + probe as *const ::core::ffi::c_void, ) } } diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/version.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/version.rs new file mode 100644 index 0000000000000000000000000000000000000000..3be3608ca3996a83861fac85d890d0ee82d5a687 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/version.rs @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use lisakmod_macros::inlinec::cconstant; + +pub const fn kernel_version() -> (u32, u32, u32) { + const CODE: u32 = match cconstant!("#include ", "LINUX_VERSION_CODE") { + Some(x) => x, + None => 0, + }; + const MAJOR: u32 = (CODE >> 16) & 0xff; + const SUBLEVEL: u32 = (CODE >> 8) & 0xff; + const PATCH: u32 = CODE & 0xff; + (MAJOR, SUBLEVEL, PATCH) +} diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/wq.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/wq.rs index 92f7b881732aa3ad64582e364e1792d4368a5440..8a09f61d5ea2c2a6ee8c8b0aa47e187f93346f79 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/wq.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/runtime/wq.rs @@ -1,68 +1,213 @@ /* SPDX-License-Identifier: GPL-2.0 */ -use alloc::boxed::Box; +use alloc::{boxed::Box, collections::BTreeMap, string::String}; use core::{ cell::UnsafeCell, convert::Infallible, - ffi::{CStr, c_int, c_void}, + ffi::c_void, + fmt, pin::Pin, ptr::NonNull, + sync::atomic::{AtomicUsize, Ordering}, }; -use lisakmod_macros::inlinec::{cfunc, incomplete_opaque_type, opaque_type}; -use pin_project::pin_project; +use lisakmod_macros::inlinec::{cexport, cfunc, incomplete_opaque_type, opaque_type}; +use pin_project::{pin_project, pinned_drop}; use crate::{ - mem::{FromContained, impl_from_contained}, - runtime::sync::{LockdepClass, Mutex, PinnedLock}, + error::{Error, error}, + mem::{FromContained, destructure, impl_from_contained}, + runtime::sync::{Lock as _, LockdepClass, LockdepSubclass, Mutex, PinnedLock}, }; incomplete_opaque_type!( - struct CWq, + pub struct CWq, "struct workqueue_struct", "linux/workqueue.h" ); -#[derive(Debug)] +type Key = usize; + +#[derive(Clone, Copy, Default)] +pub enum LockdepState { + #[default] + Normal, + Init, +} + +impl LockdepSubclass for LockdepState { + fn to_u32(&self) -> u32 { + (*self) as u32 + } +} + +struct OwnedWorkItem { + ptr: Option<*const ()>, + drop_from_worker: Box, + drop_normal: Box, +} +unsafe impl Send for OwnedWorkItem {} + +impl OwnedWorkItem { + fn drop_from_worker(mut self) { + (self.drop_from_worker)( + self.ptr + .take() + .expect("OwnedWorkItem has already been dropped"), + ) + } + + fn drop_normal(mut self) { + (self.drop_normal)( + self.ptr + .take() + .expect("OwnedWorkItem has already been dropped"), + ) + } +} + +impl Drop for OwnedWorkItem { + fn drop(&mut self) { + assert!( + self.ptr.is_none(), + "OwnedWorkItem must be dropped with a method" + ) + } +} + pub struct Wq { c_wq: NonNull>, + work_nr: AtomicUsize, + name: String, + owned_work: Mutex>, } unsafe impl Send for Wq {} unsafe impl Sync for Wq {} -impl Default for Wq { - fn default() -> Self { - Self::new() +impl fmt::Debug for Wq { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Wq").finish_non_exhaustive() } } impl Wq { #[inline] - pub fn new() -> Wq { + pub fn new(name: &str) -> Result { #[cfunc] - fn allo_workqueue(name: &CStr) -> Option>> { + fn allo_workqueue(name: &str) -> Option>> { r#" #include "#; r#" // Expose the workqueue in sysfs if a user needs to tune the CPU placement. - return alloc_workqueue(name, WQ_SYSFS | WQ_FREEZABLE, 0); + return alloc_workqueue("%.*s", WQ_SYSFS | WQ_FREEZABLE, 0, (int)name.len, name.data); "#; } - let c_wq = allo_workqueue(c"lisa").expect("Unable to allocate struct workqueue_struct"); - Wq { c_wq } + match allo_workqueue(name) { + None => Err(error!("Unable to allocate struct workqueue_struct")), + Some(c_wq) => Ok(Wq { + c_wq, + work_nr: AtomicUsize::new(0), + name: name.into(), + owned_work: Mutex::new( + BTreeMap::new(), + // Different workqueues may have different lifecycles and usage, so they can + // each get a new LockdepClass. + LockdepClass::new("WQ_OWNED_WORK_LOCKDEP_CLASS"), + ), + }), + } } #[inline] fn c_wq(&self) -> &UnsafeCell { unsafe { self.c_wq.as_ref() } } + + pub fn __attach<'a, 'wq, 'f, Init>( + self: Pin<&'a Self>, + work_item: WorkItem<'wq, 'f>, + init: Init, + ) -> Key + where + Self: 'wq, + Init: FnOnce(&mut dyn AbstractWorkItem), + { + // Use LockdepState::Init, so that lockdep can see the difference between locks taken + // during the init phase and when the work actually starts running. + let lockdep_state = LockdepState::Init; + let key: *const DelayedWork = + work_item.__with_dwork(lockdep_state, |dwork| dwork.as_ref().get_ref() as *const _); + let key = key as usize; + + let work_item = Box::into_raw(Box::new(work_item)); + // SAFETY: We clear the owned_work in Drop, thereby ensuring a WorkItem<'wq> never ends + // up surviving its associated Wq + { + let mut owned_work_guard = self.owned_work.lock(); + owned_work_guard.insert( + key, + OwnedWorkItem { + ptr: Some(work_item as *const ()), + drop_from_worker: Box::new(|ptr| { + let ptr = ptr as *mut WorkItem<'wq, 'f>; + let work = unsafe { Box::from_raw(ptr) }; + // OwnedWorkItem::drop_normal() would block until the worker is not running + // anymore, leading to a deadlock if called from the worker function. + work.drop_unsync(); + }), + drop_normal: Box::new(|ptr| { + let ptr = ptr as *mut WorkItem<'wq, 'f>; + let _ = unsafe { Box::from_raw(ptr) }; + }), + }, + ); + // SAFETY: + // * We hold the owned_work lock here, so nothing else can remove the work item we just + // inserted. + // * There is no other reference to the work_item (we stored a pointer in the BTreeMap, + // not a reference) + // * We hold the work_item lock as well, so it cannot run and drop itself while we are + // running the init function. + let work_item = unsafe { work_item.as_mut().unwrap() }; + work_item.__with_dwork(lockdep_state, |mut dwork| { + let abstr: &mut dyn AbstractWorkItem = &mut dwork; + init(abstr); + }); + drop(owned_work_guard); + } + key + } + + pub fn __detach(&self, key: Key) { + if let Some(owned) = self.owned_work.lock().remove(&key) { + owned.drop_from_worker() + } + } + + pub fn clear_owned_work(&self) { + let map = core::mem::take(&mut *self.owned_work.lock()); + for owned in map.into_values() { + owned.drop_normal() + } + } } impl Drop for Wq { fn drop(&mut self) { + self.clear_owned_work(); + + let work_nr = self.work_nr.load(Ordering::Relaxed); + if work_nr != 0 { + panic!( + "{work_nr} work items have not been dropped befre the destruction of the {name} workqueue", + name = self.name + ); + } + #[cfunc] fn destroy_workqueue(wq: NonNull>) { r#" @@ -87,7 +232,7 @@ opaque_type!( pub struct DelayedWork<'wq> { #[pin] c_dwork: CDelayedWork, - wq: &'wq Wq, + pub wq: &'wq Wq, // Flag set to true when the work should not be re-enqueued anymore. // This flag is unnecessary if disable_delayed_work_sync() function is available but for older // kernel < 6.10 where we only have cancel_delayed_work_sync(), we need to handle that manually @@ -123,61 +268,66 @@ impl<'wq> DelayedWork<'wq> { let _ = enqueue(this.wq.c_wq(), this.c_dwork, delay_us); } } + + #[inline] + fn is_pending(&self) -> bool { + #[cfunc] + fn is_pending(dwork: &CDelayedWork) -> bool { + r#" + #include + "#; + + r#" + return delayed_work_pending(dwork); + "#; + } + is_pending(&self.c_dwork) + } } -#[pin_project] -pub struct WorkItemInner<'wq, F> { +// SAFETY: We only require F to be Send instead of Send + Sync as the workqueue API guarantees +// that the worker will never execute more than once at a time provided the following +// conditions are met: +// > Workqueue guarantees that a work item cannot be re-entrant if the following conditions hold +// > after a work item gets queued: +// > 1. The work function hasn’t been changed. +// > 2. No one queues the work item to another workqueue. +// > 3. The work item hasn’t been reinitiated. +// > In other words, if the above conditions hold, the work item is guaranteed to be executed by +// > at most one worker system-wide at any given time. +// +// Condition 1. is trivially satisfied as we never update the DelayedWork. +// Condition 2. is trivially satisfied as we never enqueue the DelayedWork to more than one +// workqueue. +// Condition 3. is trivially satisfied as we never re-initialize a work item. +// +// So we can assume that the workqueue API will take care of synchronization and all we need is +// F to be Send. +type WorkF<'f> = Box Action + Send + 'f>; + +#[pin_project(PinnedDrop)] +pub struct WorkItemInner<'wq, 'f> { // SAFETY: WorkItemInner _must_ be pinned as the address of __dwork will be passed around C // APIs. #[pin] pub __dwork: DelayedWork<'wq>, - pub __f: F, + pub __f: WorkF<'f>, } -impl_from_contained!(('wq, F) WorkItemInner<'wq, F>, __dwork: DelayedWork<'wq>); - -impl<'wq, F> WorkItemInner<'wq, F> -where - // SAFETY: We only require F to be Send instead of Send + Sync as the workqueue API guarantees - // that the worker will never execute more than once at a time provided the following - // conditions are met: - // > Workqueue guarantees that a work item cannot be re-entrant if the following conditions hold - // > after a work item gets queued: - // > 1. The work function hasn’t been changed. - // > 2. No one queues the work item to another workqueue. - // > 3. The work item hasn’t been reinitiated. - // > In other words, if the above conditions hold, the work item is guaranteed to be executed by - // > at most one worker system-wide at any given time. - // - // Condition 1. is trivially satisfied as we never update the DelayedWork. - // Condition 2. is trivially satisfied as we never enqueue the DelayedWork to more than one - // workqueue. - // Condition 3. is trivially satisfied as we never re-initialize a work item. - // - // So we can assume that the workqueue API will take care of synchronization and all we need is - // F to be Send. - F: Send, -{ +impl_from_contained!(('wq, 'f) WorkItemInner<'wq, 'f>, __dwork: DelayedWork<'wq>); + +impl<'wq, 'f> WorkItemInner<'wq, 'f> { #[inline] - fn new(wq: &'wq Wq, wrapper: W) -> Pin>> - where - W: ClosureWrapper, - { - #[cfunc] - unsafe fn init_dwork( + fn new( + wq: &'wq Wq, + f: WorkF<'f>, + lockdep_class: LockdepClass, + init_dwork: unsafe fn( wq: &UnsafeCell, dwork: Pin<&mut CDelayedWork>, worker: *const c_void, - ) -> Result<(), c_int> { - r#" - #include - "#; - - r#" - INIT_DELAYED_WORK(dwork, worker); - return 0; - "#; - } + ), + ) -> Pin>> { // SAFETY: CDelayedWork does not have any specific validity invariant since it's // essentially an opaque type. We don't want to pass it to the C API before it is moved to // its final memory location in a Box. @@ -188,57 +338,91 @@ where disable: false, }; let new = Box::pin(PinnedLock::new(Mutex::new( - WorkItemInner { - __dwork, - __f: wrapper.closure(), - }, - LockdepClass::new(), + WorkItemInner { __dwork, __f: f }, + lockdep_class, ))); + wq.work_nr.fetch_add(1, Ordering::Relaxed); + + #[cexport] + pub unsafe fn worker(c_work_struct: *mut __CWorkStruct) { + // The prototype of the exported function must be exactly as the C API expects, + // otherwise we get a CFI violation. However, we know we are actually passed a + // Pin<&__CWorkStruct> + let c_work_struct = unsafe { + ::core::pin::Pin::new_unchecked( + c_work_struct.as_ref().expect("Unexpected NULL pointer"), + ) + }; + let (action, wq, key) = { + let inner = unsafe { c_work_struct.__to_work_item() }; + let mut inner = inner.lock_nested(LockdepState::Normal); + let mut inner = inner.as_mut().project(); + let dwork: &DelayedWork = &inner.__dwork; + let key = dwork as *const _ as usize; + let abstr: &mut dyn AbstractWorkItem = &mut inner.__dwork; + ((inner.__f)(abstr), inner.__dwork.wq, key) + }; + match action { + Action::DropWorkItem => { + wq.__detach(key); + } + Action::Noop => {} + } + } unsafe { init_dwork( wq.c_wq(), new.as_ref() - .lock() + // The WorkItemInner may be created at a point where another lock is taken. + // Later on, the worker may take that lock as well, leading lockdep to complain + // that both locks are taken in either orders, possibly leading to a deadlock. + // In practice, this is fine those cases correspond to a different state of the + // lifecycle, and no deadlock can happen. + .lock_nested(LockdepState::Init) .as_mut() .project() .__dwork .project() .c_dwork, - W::worker() as *const c_void, + worker as *const c_void, ) } - .expect("Could not initialize workqueue's delayed work"); new } #[inline] - unsafe fn from_dwork(c_dwork: Pin<&CDelayedWork>) -> Pin<&PinnedWorkItemInner<'wq, F>> { + unsafe fn from_dwork(c_dwork: Pin<&CDelayedWork>) -> Pin<&PinnedWorkItemInner<'wq, 'f>> { unsafe { let dwork = DelayedWork::<'wq>::from_contained(c_dwork.get_ref()) .as_ref() .unwrap(); - let inner = WorkItemInner::<'wq, F>::from_contained(dwork) - .as_ref() - .unwrap(); - let inner = Mutex::>::from_contained(inner) + let inner = WorkItemInner::<'wq, 'f>::from_contained(dwork) .as_ref() .unwrap(); - let inner = PinnedLock::>>::from_contained(inner) + let inner = Mutex::, LockdepState>::from_contained(inner) .as_ref() .unwrap(); + let inner = + PinnedLock::, LockdepState>>::from_contained(inner) + .as_ref() + .unwrap(); Pin::new_unchecked(inner) } } } -type PinnedWorkItemInner<'wq, F> = PinnedLock>>; +#[pinned_drop] +impl<'wq, 'f> PinnedDrop for WorkItemInner<'wq, 'f> { + fn drop(self: Pin<&mut Self>) { + self.__dwork.wq.work_nr.fetch_sub(1, Ordering::Relaxed); + } +} + +type PinnedWorkItemInner<'wq, 'f> = PinnedLock, LockdepState>>; -pub struct WorkItem<'wq, F> -where - F: Send, -{ +pub struct WorkItem<'wq, 'f> { // SAFETY: The WorkItemInner need to _always_ be accessed after locking the lock, i.e. even if // we have an &mut, we cannot use that knowledge to bypass taking the lock. This is because the // actual worker materializes a shared reference and takes the lock, so we need to play ball @@ -250,13 +434,10 @@ where // dwork itself. If multiple threads try to enqueue the same dwork at the same time, or if a // thread tries to enqueue it at the same time as it enqueues itself, there would be a race // condition. - inner: Pin>>, + inner: Pin>>, } -impl<'wq, F> Drop for WorkItem<'wq, F> -where - F: Send, -{ +impl<'wq, 'f> Drop for WorkItem<'wq, 'f> { #[inline] fn drop(&mut self) { // SAFETY: We need to ensure the worker will not fire anymore and has finished running @@ -316,36 +497,76 @@ where } } -impl<'wq, F> WorkItem<'wq, F> -where - F: Send, -{ +impl<'wq, 'f> WorkItem<'wq, 'f> { #[inline] - pub fn __private_new(wq: &'wq Wq, wrapper: W) -> WorkItem<'wq, F> + pub fn __private_new( + wq: &'wq Wq, + f: F, + lockdep_class: LockdepClass, + init_dwork: unsafe fn( + wq: &UnsafeCell, + dwork: Pin<&mut CDelayedWork>, + worker: *const c_void, + ), + ) -> WorkItem<'wq, 'f> where - W: ClosureWrapper, + F: 'f + FnMut(&mut dyn AbstractWorkItem) -> Action + Send, { WorkItem { - inner: WorkItemInner::new(wq, wrapper), + inner: WorkItemInner::new(wq, Box::new(f), lockdep_class, init_dwork), } } #[inline] - fn with_dwork(&self, f: _F) -> T + fn with_dwork(&self, f: F) -> T where - _F: FnOnce(Pin<&mut DelayedWork<'wq>>) -> T, + F: FnOnce(Pin<&mut DelayedWork<'wq>>) -> T, { - f(self.inner.as_ref().lock().as_mut().project().__dwork) + self.__with_dwork(LockdepState::Normal, f) + } + + #[inline] + fn __with_dwork(&self, lockdep_state: LockdepState, f: F) -> T + where + F: FnOnce(Pin<&mut DelayedWork<'wq>>) -> T, + { + f(self + .inner + .as_ref() + .lock_nested(lockdep_state) + .as_mut() + .project() + .__dwork) + } + + #[inline] + fn __enqueue(&self, lockdep_state: LockdepState, delay_us: u64) { + self.__with_dwork(lockdep_state, |dwork| dwork.enqueue(delay_us)) } #[inline] pub fn enqueue(&self, delay_us: u64) { - self.with_dwork(|dwork| dwork.enqueue(delay_us)) + self.__enqueue(LockdepState::Normal, delay_us) + } + + #[inline] + pub fn is_pending(&self) -> bool { + self.with_dwork(|dwork| dwork.is_pending()) + } + + fn drop_unsync(self) { + // We need to make sure we drop all the fields. To make sure we didn't forget any, we + // pattern match on the struct. + let WorkItem { inner: _ } = &self; + // Simply drop the fields, without running any logic to synchronize with the workqueue. + let (inner,) = destructure!(self, inner); + drop(inner); } } pub trait AbstractWorkItem { fn enqueue(&mut self, delay_us: u64); + fn is_pending(&self) -> bool; } impl<'wq> AbstractWorkItem for Pin<&mut DelayedWork<'wq>> { @@ -353,6 +574,11 @@ impl<'wq> AbstractWorkItem for Pin<&mut DelayedWork<'wq>> { fn enqueue(&mut self, delay_us: u64) { self.as_mut().enqueue(delay_us) } + + #[inline] + fn is_pending(&self) -> bool { + self.as_ref().is_pending() + } } incomplete_opaque_type!( @@ -365,10 +591,7 @@ impl __CWorkStruct { /// # Safety /// /// This function assumes that the __CWorkStruct is nested inside a PinnedWorkItemInner. - pub unsafe fn __to_work_item<'wq, F>(self: Pin<&Self>) -> Pin<&PinnedWorkItemInner<'wq, F>> - where - F: Send, - { + pub unsafe fn __to_work_item<'wq, 'f>(self: Pin<&Self>) -> Pin<&PinnedWorkItemInner<'wq, 'f>> { #[cfunc] fn to_dwork(work: Pin<&__CWorkStruct>) -> Pin<&CDelayedWork> { r#" @@ -380,98 +603,97 @@ impl __CWorkStruct { "# } let c_dwork = to_dwork(self); - unsafe { WorkItemInner::<'wq, F>::from_dwork(c_dwork) } + unsafe { WorkItemInner::<'wq, 'f>::from_dwork(c_dwork) } } } -/// # Safety -/// -/// This trait must not be implemented by the user directly. Use the new_work_item!() macro -/// instead. -pub unsafe trait Worker { - fn worker() -> unsafe extern "C" fn(*mut __CWorkStruct); +pub enum Action { + Noop, + DropWorkItem, } -pub trait ClosureWrapper: Worker { - type Closure: FnMut(&mut dyn AbstractWorkItem) + Send; - fn closure(self) -> Self::Closure; +macro_rules! __new_work_item { + ($wq:expr, $f:expr) => {{ + // We create this function here and pass it down rather than having a single copy of the + // function used for all work items because INIT_DELAYED_WORK() also statically creates a + // lockdep class for the item. If we use the same INIT_DELAYED_WORK() call site for all + // workers, they will collectively be treated as a single function from lockdep + // perspective, creating dependencies between locks that do not exist in practice. + #[::lisakmod_macros::inlinec::cfunc] + unsafe fn init_dwork( + wq: &::core::cell::UnsafeCell<$crate::runtime::wq::CWq>, + dwork: ::core::pin::Pin<&mut $crate::runtime::wq::CDelayedWork>, + worker: *const ::core::ffi::c_void, + ) { + r#" + #include + "#; + + r#" + INIT_DELAYED_WORK(dwork, worker); + "#; + } + $crate::runtime::sync::new_static_lockdep_class!(WORK_ITEM_INNER_LOCKDEP_CLASS); + $crate::runtime::wq::WorkItem::__private_new( + $wq, + $f, + WORK_ITEM_INNER_LOCKDEP_CLASS.clone(), + init_dwork, + ) + }}; } +#[allow(unused_imports)] +pub(crate) use __new_work_item; + macro_rules! new_work_item { ($wq:expr, $f:expr) => {{ - // SAFETY: We need to ensure Send for the closure, as WorkItem relies on that - // to soundly implement Send - pub type Closure = impl ::core::ops::FnMut(&mut dyn $crate::runtime::wq::AbstractWorkItem) - + ::core::marker::Send - + 'static; - let closure: Closure = $f; - - // A layer is necessary as we cannot implement Worker directly for Closure due to this - // issue: - // https://github.com/rust-lang/rust/issues/139583 - struct Wrapper { - closure: Closure, + fn coerce_hrtb( + f: F, + ) -> F { + f } + let mut f_ = coerce_hrtb($f); + let f = move |work: &mut dyn $crate::runtime::wq::AbstractWorkItem| { + f_(work); + $crate::runtime::wq::Action::Noop + }; + $crate::runtime::wq::__new_work_item!($wq, f) + }}; +} - struct MyClosure { - closure: Closure, - } - impl ::core::ops::FnOnce<(&mut dyn $crate::runtime::wq::AbstractWorkItem,)> for MyClosure { - type Output = (); - extern "rust-call" fn call_once( - mut self, - args: (&mut dyn $crate::runtime::wq::AbstractWorkItem,), - ) -> Self::Output { - (self.closure)(args.0) - } - } - impl ::core::ops::FnMut<(&mut dyn $crate::runtime::wq::AbstractWorkItem,)> for MyClosure { - extern "rust-call" fn call_mut( - &mut self, - args: (&mut dyn $crate::runtime::wq::AbstractWorkItem,), - ) -> Self::Output { - (self.closure)(args.0) - } - } +#[allow(unused_imports)] +pub(crate) use new_work_item; - impl $crate::runtime::wq::ClosureWrapper for Wrapper { - type Closure = MyClosure; - #[inline] - // TODO: On Rust 1.86, using the Closure type alias directly triggers an error stating - // the type is unconstrained by appears in closure() return type. As a workaround, we - // encapsulate the closure into MyClosure, which hides the type alias and everything - // works on older rustc versions. - fn closure(self) -> MyClosure { - MyClosure { - closure: self.closure, - } - } +macro_rules! new_attached_work_item { + ($wq:expr, $f:expr, $init:expr) => {{ + fn check(f: F) -> F + where + F: ::core::ops::FnMut(&mut dyn $crate::runtime::wq::AbstractWorkItem), + { + f } - unsafe impl $crate::runtime::wq::Worker for Wrapper { - fn worker() -> unsafe extern "C" fn(*mut $crate::runtime::wq::__CWorkStruct) { - #[::lisakmod_macros::inlinec::cexport] - pub unsafe fn worker(c_work_struct: *mut $crate::runtime::wq::__CWorkStruct) { - // The prototype of the exported function must be exactly as the C API expects, - // otherwise we get a CFI violation. However, we know we are actually passed a - // Pin<&__CWorkStruct> - let c_work_struct = unsafe { - ::core::pin::Pin::new_unchecked( - c_work_struct.as_ref().expect("Unexpected NULL pointer"), - ) - }; - let inner = unsafe { c_work_struct.__to_work_item::() }; - let mut inner = inner.lock(); - let mut inner = inner.as_mut().project(); - let abstr: &mut dyn $crate::runtime::wq::AbstractWorkItem = &mut inner.__dwork; - (inner.__f)(abstr); - } - worker + let mut f_ = check($f); + let f = move |work: &mut dyn $crate::runtime::wq::AbstractWorkItem| { + f_(work); + if work.is_pending() { + $crate::runtime::wq::Action::Noop + } else { + // If the worker did not re-enqueue itself, we can drop it as nothing else will + // enqueue it from now-on. + $crate::runtime::wq::Action::DropWorkItem } - } + }; - $crate::runtime::wq::WorkItem::__private_new($wq, Wrapper { closure }) + let wq: ::core::pin::Pin<&$crate::runtime::wq::Wq> = $wq; + // SAFETY: Ensure the 'f lifetime parameter of WorkItem is 'static, so that we cannot + // accidentally pass a closure that would become invalid before the workqueue tries to drop + // it. + let work: $crate::runtime::wq::WorkItem<'_, 'static> = + $crate::runtime::wq::__new_work_item!(wq.get_ref(), f); + wq.__attach(work, $init); }}; } #[allow(unused_imports)] -pub(crate) use new_work_item; +pub(crate) use new_attached_work_item; diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/typemap.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/typemap.rs index 3b122899074d1b81860f069e376891f0ef531b8c..2b286c09218b83bf953ea077a59c932ae00c92b5 100644 --- a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/typemap.rs +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/typemap.rs @@ -139,6 +139,21 @@ where }) } + #[inline] + pub fn get_mut(&mut self) -> Option<&mut >::Value> + where + Key: ?Sized + KeyOf, + >::Value: 'static, + { + let key = TypeId::of::(); + self.get_any_mut(&key).map(|v| { + &mut ((v as &mut dyn Any) + .downcast_mut::>() + .expect("An Any value of the wrong concrete type was inserted for that key.") + .value) + }) + } + #[inline] fn insert_any(&mut self, type_id: TypeId, value: Box) { self.inner.insert(type_id, value); @@ -148,6 +163,11 @@ where fn get_any(&self, type_id: &TypeId) -> Option<&dyn Value> { self.inner.get(type_id).map(|x| &**x) } + + #[inline] + fn get_any_mut(&mut self, type_id: &TypeId) -> Option<&mut dyn Value> { + self.inner.get_mut(type_id).map(|x| &mut **x) + } } macro_rules! make_index { @@ -185,6 +205,7 @@ macro_rules! make_index { #[allow(unused_imports)] pub(crate) use make_index; +#[allow(unused_macros)] macro_rules! add_index_key { ($index:ty, $key:ty, $value_ty:ty) => { impl $crate::typemap::KeyOf<$index> for $key { @@ -192,3 +213,6 @@ macro_rules! add_index_key { } }; } + +#[allow(unused_imports)] +pub(crate) use add_index_key; diff --git a/lisa/_assets/kmodules/lisa/rust/lisakmod/src/version.rs b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/version.rs new file mode 100644 index 0000000000000000000000000000000000000000..f153c067ae2266cbf13408a4f6472d3a8aa1ae9d --- /dev/null +++ b/lisa/_assets/kmodules/lisa/rust/lisakmod/src/version.rs @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +use lisakmod_macros::inlinec::cfunc; + +#[cfunc] +pub fn module_version() -> &'static str { + r#" + #include + #include "generated/module_version.h" + "#; + + r#" + static const char *version = LISA_MODULE_VERSION; + return (struct const_rust_str){ + .data = version, + .len = strlen(version) + }; + "# +} + +#[cfunc] +pub fn module_name() -> &'static str { + r#" + #include + "#; + + r#" + static const char *s = KBUILD_MODNAME; + return (struct const_rust_str){ + .data = s, + .len = strlen(s) + }; + "# +} + +#[cfunc] +pub fn print_prefix() -> &'static str { + r#" + #include + "#; + + r#" + static const char *s = KBUILD_MODNAME ": "; + return (struct const_rust_str){ + .data = s, + .len = strlen(s) + }; + "# +} diff --git a/lisa/_cli_tools/lisa_load_kmod.py b/lisa/_cli_tools/lisa_load_kmod.py index eade26171bcbfda9a979861813a302391d43a0ab..5df6364ab3a52fa9017313854f92d9e19a9950aa 100755 --- a/lisa/_cli_tools/lisa_load_kmod.py +++ b/lisa/_cli_tools/lisa_load_kmod.py @@ -26,22 +26,58 @@ import textwrap import logging import tempfile import shlex +import itertools +import pathlib from lisa.target import Target -from lisa._kmod import LISADynamicKmod +from lisa._kmod import LISADynamicKmod, KmodSrc from lisa.utils import ignore_exceps + +def lisa_kmod(logger, target, args, reset_config): + if (features := args.feature) is None: + if args.no_enable_all: + logger.info('No feature will be enabled') + config = {} + else: + logger.info('All features will be enabled on a best-effort basis') + config = { + 'all': { + 'best-effort': True, + }, + } + else: + config = dict.fromkeys(features) + + config = { + 'features': config + } + kmod = target.get_kmod(LISADynamicKmod) + + @contextlib.contextmanager + def cm(): + with kmod.run(config=config, reset_config=reset_config) as _kmod: + pretty_events = ', '.join(_kmod._defined_events) + logger.info(f'Kernel module provides the following ftrace events: {pretty_events}') + yield _kmod + + return cm() + + def main(): params = { 'feature': dict( action='append', help='Enable a specific module feature. Can be repeated. By default, the module will try to enable all features and will log in dmesg the ones that failed to enable' ), + 'no-enable-all': dict( + action='store_true', + help='Do not attempt to enable all features, only enable the features that are specified using --feature or in the configuration if no --feature option is passed' + ), 'cmd': dict( nargs=argparse.REMAINDER, help='Load the module, run the given command then unload the module. If not command is provided, just load the module and exit.' ) - } args, target = Target.from_custom_cli( @@ -66,53 +102,55 @@ def main(): def _main(args, target): logger = logging.getLogger('lisa-load-kmod') - features = args.feature keep_loaded = not bool(args.cmd) cmd = args.cmd or [] if cmd and cmd[0] == '--': cmd = cmd[1:] - kmod_params = {} - if features is not None: - kmod_params['features'] = list(features) - - kmod = target.get_kmod(LISADynamicKmod) - pretty_events = ', '.join(kmod.defined_events) - logger.info(f'Kernel module provides the following ftrace events: {pretty_events}') + kmod_cm = lisa_kmod( + logger=logger, + target=target, + args=args, + # If we keep the module loaded after exiting, we treat that as + # resetting the state of the module so we reset the config. + # + # Otherwise, we will simply push our bit of config, and then pop it + # upon exiting. + reset_config=keep_loaded, + ) - _kmod_cm = kmod.run(kmod_params=kmod_params) + def run_cmd(): + if cmd: + pretty_cmd = ' '.join(map(shlex.quote, cmd)) + logger.info(f'Running command: {pretty_cmd}') + return subprocess.run(cmd).returncode + else: + return 0 if keep_loaded: @contextlib.contextmanager def cm(): logger.info('Loading kernel module ...') - yield _kmod_cm.__enter__() + kmod = kmod_cm.__enter__() + yield logger.info(f'Loaded kernel module as "{kmod.mod_name}"') else: @contextlib.contextmanager def cm(): - with _kmod_cm: + with kmod_cm: logger.info('Loading kernel module ...') try: yield finally: logger.info('Unloading kernel module') - kmod_cm = cm() - def run_cmd(): - if cmd: - pretty_cmd = ' '.join(map(shlex.quote, cmd)) - logger.info(f'Running command: {pretty_cmd}') - return subprocess.run(cmd).returncode - else: - return 0 - - with kmod_cm: + with cm(): ret = run_cmd() return ret + if __name__ == '__main__': sys.exit(main()) diff --git a/lisa/_kmod.py b/lisa/_kmod.py index 7253f41123c1e59f478811b1fca7922254ed6ab8..2b85cb7a86e7c4ee44adf8a145e7e7d12e400eed 100644 --- a/lisa/_kmod.py +++ b/lisa/_kmod.py @@ -139,6 +139,7 @@ from enum import IntEnum import traceback import uuid import textwrap +import json from elftools.elf.elffile import ELFFile @@ -146,7 +147,7 @@ from devlib.target import AndroidTarget, KernelVersion, TypedKernelConfig, Kerne from devlib.host import LocalConnection from devlib.exception import TargetStableError -from lisa.utils import nullcontext, Loggable, checksum, DirCache, chain_cm, memoized, LISA_HOST_ABI, subprocess_log, SerializeViaConstructor, destroyablecontextmanager, ContextManagerExit, ignore_exceps, get_nested_key, is_link_dead, deduplicate, subprocess_detailed_excep +from lisa.utils import nullcontext, Loggable, checksum, DirCache, chain_cm, memoized, LISA_HOST_ABI, subprocess_log, SerializeViaConstructor, destroyablecontextmanager, ContextManagerExit, ignore_exceps, get_nested_key, is_link_dead, deduplicate, subprocess_detailed_excep, HideExekallID from lisa._assets import ASSETS_PATH, HOST_PATH, ABI_BINARIES_FOLDER from lisa._unshare import ensure_root import lisa._git as git @@ -188,8 +189,15 @@ class KmodVersionError(Exception): """ pass +class KmodQueryError(Exception): + """ + Raised when a query to the kernel module failed. + """ + pass -_ALPINE_DEFAULT_VERSION = '3.21.3' + + +_ALPINE_DEFAULT_VERSION = '3.22.0' _ALPINE_ROOTFS_URL = 'https://dl-cdn.alpinelinux.org/alpine/v{minor}/releases/{arch}/alpine-minirootfs-{version}-{arch}.tar.gz' _ALPINE_PACKAGE_INFO_URL = 'https://pkgs.alpinelinux.org/package/v{version}/{repo}/{arch}/{package}' @@ -1194,6 +1202,7 @@ class _KernelBuildEnvConf(SimpleMultiSrcConf): VariadicLevelKeyDesc('modules', 'modules settings', LevelKeyDesc('', 'For each module. The module shipped by LISA is "lisa"', ( KeyDesc('overlays', 'Overlays to apply to the sources of the given module', [typing.Dict[str, OverlayResource]]), + KeyDesc('conf', 'Configuration of the module', [object]), ) )) ), @@ -2596,7 +2605,7 @@ class KmodSrc(Loggable): } self._mod_name = name - self.logger.debug(f'Created {self.__class__.__qualname__} with name {self._mod_name} and sources: {", ".join(self.src.keys())}') + self.logger.debug(f'Created {self.__class__.__qualname__} with name {self.mod_name} and sources: {", ".join(self.src.keys())}') @property def code_files(self): @@ -2666,10 +2675,14 @@ class KmodSrc(Loggable): return self.src['Makefile'] except KeyError: name = self.mod_name + def object_path(filename): + path = Path(filename) + path = path.parent / f'{path.stem}.o' + return str(path) return '\n'.join(( f'obj-m := {name}.o', f'{name}-y := ' + ' '.join( - f'{Path(filename).stem}.o' + object_path(filename) for filename in sorted(self.c_files.keys()) ) )).encode('utf-8') @@ -2736,6 +2749,12 @@ class KmodSrc(Loggable): def find_mod_file(path): filenames = glob.glob(str(path.resolve() / '*.ko')) + if len(filenames) > 1: + filenames = [ + filename + for filename in filenames + if Path(filename).stem == self.mod_name + ] if not filenames: raise FileNotFoundError(f'Could not find .ko file in {path}') @@ -3010,6 +3029,15 @@ class DynamicKmod(Loggable): def mod_name(self): return self.src.mod_name + @property + def is_loaded(self): + modules = self.target.read_value('/proc/modules') + loaded = { + line.split()[0] + for line in modules.splitlines() + } + return self.mod_name in loaded + @classmethod def from_target(cls, target, **kwargs): """ @@ -3221,11 +3249,6 @@ class DynamicKmod(Loggable): ) ) - try: - self.uninstall() - except Exception: - pass - with kmod_cm as ko_path, tempfile.NamedTemporaryFile() as dmesg_out: dmesg_coll = ignore_exceps( Exception, @@ -3239,14 +3262,12 @@ class DynamicKmod(Loggable): except Exception as e: log_dmesg(dmesg_coll, logger.error) - - if isinstance(e, subprocess.CalledProcessError) and e.returncode == errno.EPROTO: - raise KmodVersionError('In-tree module version does not match what LISA expects. If the module was pre-installed on the target, please contact the 3rd party that shared this setup to you as they took responsibility for maintaining it. This setup is available but unsupported (see online documenation)') - else: - raise + raise else: log_dmesg(dmesg_coll, logger.debug) + return self + def uninstall(self): """ Unload the module from the target. @@ -3266,11 +3287,6 @@ class DynamicKmod(Loggable): :Variable keyword arguments: Forwarded to :meth:`install`. """ - try: - self.uninstall() - except Exception: - pass - x = self.install(**kwargs) try: yield x @@ -3306,7 +3322,7 @@ class FtraceDynamicKmod(DynamicKmod): @property @memoized - def defined_events(self): + def _defined_events(self): """ Ftrace events defined in that module. """ @@ -3363,7 +3379,7 @@ class FtraceDynamicKmod(DynamicKmod): class _LISADynamicKmodSrc(KmodSrc): _RUST_SPEC = dict( - version='1.86.0', + version='1.88.0', components=[ # rust-src for -Zbuild-std 'rust-src', @@ -3455,16 +3471,121 @@ class LISADynamicKmod(FtraceDynamicKmod): **kwargs, ) - def _event_features(self, events): - all_events = self.defined_events - return set( - f'event__{event}' - for pattern in events - for event in fnmatch.filter(all_events, pattern) - ) + def _query(self, queries): + target = self.target + logger = self.logger + busybox = quote(target.busybox) + name = self.mod_name + + # WARNING: if more queries are added here, their result must be removed + # from what is returned to the user. + queries = [ + *queries, + {'close-session': None} + ] + content = json.dumps(queries) + logger.debug(f'Queries: {content}') + content = quote(content) + root = f'/sys/module/{name}/queries' + cmd = f'session="$({busybox} cat {root}/new_session)" && {busybox} printf "%s" {content} > {root}/$session/query && cat {root}/$session/execute' + result = target.execute(cmd, as_root=True) + logger.debug(f'Queries result: {result}') + result = json.loads(result) + + def unpack(result, ok): + try: + err = result['error'] + except KeyError: + return result[ok] + else: + raise KmodQueryError(err) + + results = unpack(result, ok='executed') + results = [ + unpack(res, ok='success') + for res in results + ] + # Remove the "close-session" query + results = results[:-1] + return results + + def _push_start(self, configs=None, features=None): + def depth(): + res = self._query([ + { + 'pop-features-config': {'n': 0}, + }, + ]) + return res[0]['pop-features-config']['remaining'] + + configs = list(configs) or [] + init_depth = depth() + + def get_features_conf(conf): + if conf: + allowed = {'features'} + disallowed = conf.keys() - allowed + if disallowed: + raise KeyError(f'Forbidden keys in module configuration: {disallowed}') + else: + return conf.get('features', {}) + else: + return {} + + def make_query(config): + features_config = get_features_conf(config or {}) + _features = features_config.keys() if features is None else features + _features = sorted(set(_features)) + return { + 'push-features-config': { + 'config': features_config, + 'enable-features': _features + } + } + + queries = [ + make_query(config) + for config in configs + ] + queries.append({ + 'start-features': None + }) + try: + self._query(queries) + except Exception as e: + self._pop_stop(n=depth() - init_depth) + raise e + + def _pop_stop(self, n=1): + logger = self.logger + + res = self._query([ + { + 'pop-features-config': {'n': n}, + }, + ]) + # If there are some configs left on the stack, we simply restart the + # features now that we have popped our config. + if (remaining := res[0]['pop-features-config']['remaining']): + logger.debug(f'{remaining} configs in the stack, the module will not be removed') + self._query([ + { + 'start-features': None, + }, + ]) + # If no config left on the stack, we are the last users of the module + # so we just shut it down and rmmod it. + else: + logger.debug(f'Config stack empty, removing the module') + self._query([ + { + 'stop-features': None, + }, + ]) + super().uninstall() - def install(self, kmod_params=None): + def install(self, kmod_params=None, config=None, features=None, reset_config=False): target = self.target logger = self.logger busybox = quote(target.busybox) @@ -3489,16 +3610,80 @@ class LISADynamicKmod(FtraceDynamicKmod): kmod_params = kmod_params or {} - kmod_params['version'] = self.src.checksum + kmod_params['___param_enable_all_features'] = 0 base_path, kmod_filename = guess_kmod_path() logger.debug(f'Looking for pre-installed {kmod_filename} module in {base_path}') super_ = super() + + def configure(config): + config = config or {} + version = self._query([{'get-version': None}]) + checksum = version[0]['get-version']['checksum'] + if checksum != self.src.checksum: + raise KmodVersionError('In-tree module version does not match what LISA expects. If the module was pre-installed on the target, please contact the 3rd party that shared this setup to you as they took responsibility for maintaining it. This setup is available but unsupported (see online documenation)') + else: + if reset_config: + self._query([{ + 'pop-features-config': {'n': 'all'} + }]) + + try: + base_conf = get_nested_key( + self._kernel_build_env.conf, + ['modules', self.mod_name, 'conf'] + ) + except KeyError: + base_conf = {} + + # If no features were asked explicitly, only enable the ones + # listed in the particular config we received. The feature + # configuration from the conf file is only used to configure + # features in the event they are actually needed, it does not + # trigger a "need" on itself. + _features = ( + config.get('features', {}).keys() + if features is None else + features + ) + + self._push_start( + # WARNING: if more configs are added here, _pop_stop() must + # be updated accordingly to pop them from the stack. + configs=[base_conf, config], + features=_features, + ) + + def pristine_load(install): + install(kmod_params=kmod_params) + configure(config) + + def load(install): + if self.is_loaded: + try: + configure(config) + except KmodVersionError as e: + # Only rmmod/insmod the module if we asked to reset the + # config, otherwise we would be wiping existing config that + # the user may want to preserve. + if reset_config: + logger.info(f'The currently loaded {self.mod_name} module is not matching the expected version, re-loading') + super_.uninstall() + pristine_load(install) + else: + raise e + else: + logger.debug(f'The currently loaded {self.mod_name} module is matching the expected version so it was simply reconfigured') + else: + pristine_load(install) + + return _LoadedLISADynamicKmod(self) + def preinstalled_unsuitable(excep=None): if excep is not None: logger.debug(f'Pre-installed {kmod_filename} is unsuitable, recompiling: {excep.__class__.__qualname__}: {excep}') - return super_.install(kmod_params=kmod_params) + return load(super_.install) try: kmod_path = target.execute( @@ -3517,16 +3702,85 @@ class LISADynamicKmod(FtraceDynamicKmod): # We found an installed module that could maybe be suitable, so # we try to load it. try: - return self._install(nullcontext(kmod_path), kmod_params=kmod_params) + return load(lambda *kwargs: self._install(nullcontext(kmod_path), **kwargs)) except (subprocess.CalledProcessError, KmodVersionError) as e: # Turns out to not be suitable, so we build our own return preinstalled_unsuitable(e) - else: - logger.warning(f'Loaded "{self.mod_name}" module from pre-installed location: {kmod_path}. This implies that the module was compiled by a 3rd party, which is available but unsupported. If you experience issues related to module version mismatch in the future, please contact them for updating the module. This may break at any time, without notice, and regardless of the general backward compatibility policy of LISA.') - return None # If base_path exists, busybox find will simply an empty stdout # rather than return with a non-zero exit status. else: return preinstalled_unsuitable() + def uninstall(self): + # Pop the conf from the _KernelBuildEnvConf and the one passed via + # method parameters. + self._pop_stop(n=2) + + +class _LoadedLISADynamicKmod: + def __init__(self, kmod): + self._kmod = kmod + + def __getattr__(self, attr): + return getattr(self._kmod, attr) + + def _event_features(self, events, strict=True): + events = set(events) + (res,) = self._query(['get-resources']) + resources = res['get-resources']['features'] + + available_events = { + event + for feature in resources.values() + for event in feature['provided']['ftrace-events'] + } + + missing_events = set() + def expand(pattern): + expanded = fnmatch.filter( + available_events, + pattern, + ) + if not expanded: + missing_events.add(pattern) + return expanded + + events = { + event + for pattern in events + for event in expand(pattern) + } + + if strict and missing_events: + from lisa.trace import MissingTraceEventError + raise MissingTraceEventError( + missing_events=missing_events, + available_events=available_events, + msg='The LISA kernel module cannot provide the following required events: {missing_events}{available}', + ) + else: + features = { + name + for name, feature in resources.items() + if events & set(feature['provided']['ftrace-events']) + } + return sorted(features) + + def _event_features_conf(self, *args, **kwargs): + features = self._event_features(*args, **kwargs) + config = {'features': dict.fromkeys(features)} + return config + + @contextlib.contextmanager + def _reconfigure(self, configs=None, features=None): + configs = configs or [] + self._push_start( + configs=configs, + features=features, + ) + try: + yield + finally: + self._pop_stop(n=len(configs)) + # vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 diff --git a/lisa/trace.py b/lisa/trace.py index 7e6cbb5a50a18ccf2b2a029a6b519570b1cccf9b..9163860001ee7380258aeebf777aacdb699899b8 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -7508,18 +7508,24 @@ class FtraceCollector(CollectorBase, Configurable): tracing_path = devlib.FtraceCollector.find_tracing_path(target) target_available_events, avoided = self._target_available_events(target, tracing_path) + # We always exclude the events expected to come + # from the module as we need to load the module and query it to + # know if they are actually available. If the module is already + # loaded, ftrace will report the event as being available but there + # is nothing guaranteeing that the module can actually emit it + # (e.g. that it has all its probe succesfully setup) + target_available_events = { + event + for event in target_available_events + if not event.startswith('lisa__') + } + kmod = target.get_kmod(LISADynamicKmod) # Get the events possibly defined in the module. Note that it's a # superset of the events actually defined as this is based on pretty # crude filtering of the source files, rather than analyzing the events # actually defined in the .ko after it has been compiled. kmod_available_events = set(kmod.possible_events) - - # Events provided by the module are namespaced and therefore should - # never overlap with the target, so we never want to provide it via the - # module if it already exists on the target. - kmod_available_events -= target_available_events - available_events = target_available_events | kmod_available_events if events is None: @@ -7585,9 +7591,24 @@ class FtraceCollector(CollectorBase, Configurable): if 'funcgraph_entry' in events or 'funcgraph_exit' in events: tracer = 'function_graph' if tracer is None else tracer - # If some events are not already available on that kernel, look them up - # in custom modules - needed_from_kmod = kmod_available_events & events + needed_from_kmod = ( + # If we ask for anything that the module provides, we need to + # load the module in order to check whether the event is indeed + # provided (i.e. it will be emitted at runtime). If we don't do + # that and if the module is already loaded, ftrace will show an + # event as "existing" but the kernel module may not necessarily + # be able to emit it if e.g. it failed to register a probe + # somewhere. We can only clarify that by talking to the loaded + # module. + { + event + for event in events + if event.startswith('lisa__') + } | + # If some events are not already available on that kernel, look + # them up in custom modules + (kmod_available_events & events) + ) kmod_defined_events = set() kmod_cm = None @@ -7701,27 +7722,26 @@ class FtraceCollector(CollectorBase, Configurable): def _get_kmod(cls, target, target_available_events, needed_events): logger = cls.get_logger() kmod = target.get_kmod(LISADynamicKmod) - defined_events = set(kmod.defined_events) - possible_events = set(kmod.possible_events) - - logger.debug(f'Kernel module possible events: {possible_events}') + defined_events = set(kmod._defined_events) logger.debug(f'Kernel module defined events: {defined_events}') needed = needed_events & defined_events if needed: overlapping = defined_events & target_available_events if overlapping: - raise ValueError(f'Events defined in {mod.src.mod_name} ({", ".join(needed)}) are needed but some events overlap with the ones already provided by the kernel: {", ".join(overlapping)}') + raise ValueError(f'Events defined in {kmod.src.mod_name} ({", ".join(needed)}) are needed but some events overlap with the ones already provided by the kernel: {", ".join(overlapping)}') else: + @contextlib.contextmanager + def cm(): + with kmod.run() as _kmod: + config = _kmod._event_features_conf(needed) + with _kmod._reconfigure(configs=[config]): + yield + return ( defined_events, needed, - functools.partial( - kmod.run, - kmod_params={ - 'features': sorted(kmod._event_features(needed)) - } - ) + cm, ) else: return (defined_events, set(), None) diff --git a/lisa/wa/plugins/_kmod.py b/lisa/wa/plugins/_kmod.py index 3cac60b23d42a8eca59facf648713efda3ee6a40..187f17ad1d85caf44a86757ad99d1c3d236de913 100644 --- a/lisa/wa/plugins/_kmod.py +++ b/lisa/wa/plugins/_kmod.py @@ -15,7 +15,7 @@ import functools from weakref import WeakKeyDictionary -from contextlib import contextmanager, nullcontext +from contextlib import contextmanager, nullcontext, ExitStack import threading from wa import Instrument, Parameter @@ -121,6 +121,11 @@ class LisaKmodInstrument(Instrument): """), ] + # When running WA, we want the agenda to describe the expected + # state of the system so that runs are reproducible and not + # impacted by external factors. + _RESET_CONFIG = True + def __init__(self, target, kernel_src, build_env, ftrace_events, **kwargs): super().__init__(target, **kwargs) self._lisa_target = LISATarget._from_devlib_target( @@ -130,9 +135,7 @@ class LisaKmodInstrument(Instrument): kmod_build_env=build_env, ) self._ftrace_events = set(ftrace_events) - self._kmod = None - self._cm = None - self._features = set() + self._finalize = None # Add a new attribute to the devlib target so we can find ourselves # from the monkey-patched methods. @@ -142,9 +145,7 @@ class LisaKmodInstrument(Instrument): def _monkey_patch(cls, instrument): patch = dict( initialize=cls._initialize_cm, - setup=cls._setup_cm, - start=cls._start_cm, - stop=cls._stop_cm, + finalize=cls._finalize_cm, ) def make_wrapper(orig, f): @@ -218,45 +219,51 @@ class LisaKmodInstrument(Instrument): return set(trace_cmd_events) | set(self._ftrace_events) - def _run(self): - features = sorted(self._features) - self.logger.info(f'Enabling LISA kmod features {", ".join(features)}') - return self._kmod.run( - kmod_params={ - 'features': features, - } - ) - + # We patch initialize() and finalize() hooks so that: + # 1. We have loaded the module before the instruments we patch get + # initialized. This is important for trace-cmd as the events need to be + # available in tracefs for trace-cmd to be successfully initialized. + # 2. We only unload the module after the trace has been collected in + # trace.dat. Premature unloading happening between "trace-cmd stop" and + # "trace-cmd extract" lead to trace.dat not containing the events + # descriptors as they will have vanished from tracefs when the module is + # unloaded. @contextmanager def _initialize_cm(self, context): # Note that this function will be ran for each monkey patched # instrument, unlike the other methods ran in job context. - events = self._all_ftrace_events(context) - kmod = self._lisa_target.get_kmod(LISADynamicKmod) - self._features = set(kmod._event_features(events)) - self._kmod = kmod + if self._finalize is None: + events = self._all_ftrace_events(context) + kmod = self._lisa_target.get_kmod(LISADynamicKmod) - # Load the module while running the instrument's initialize so that the - # events are visible in the kernel at that point. - with self._run(): - yield + with ExitStack() as stack: + self.logger.info(f'Loading LISA kmod') - @contextmanager - def _setup_cm(self, context): - self._cm = self._run() - yield + # Load the module before instruments initialize so that the events are + # visible in the kernel at that point. + kmod = stack.enter_context( + kmod.run(reset_config=self._RESET_CONFIG) + ) + config = kmod._event_features_conf(events, strict=False) + stack.enter_context( + kmod._reconfigure(configs=[config]) + ) - @contextmanager - def _start_cm(self, context): - self.logger.info(f'Loading LISA kmod') - self._cm.__enter__() + # If we reached this stage without raising an exception, we create + # a fresh ExitStack and move all the context to it so that the + # current stack will not exit those contexts. + self._finalize = stack.pop_all().close yield @contextmanager - def _stop_cm(self, context): - self.logger.info(f'Unloading LISA kmod') - self._cm.__exit__(None, None, None) + def _finalize_cm(self, context): + # Note that this function will be ran for each monkey patched + # instrument, unlike the other methods ran in job context. yield + if (finalize := self._finalize) is not None: + self._finalize = None + self.logger.info(f'Unloading LISA kmod') + finalize() # Monkey-patch the trace-cmd instrument in order to reliably load the