diff --git a/lisa/_assets/kmodules/lisa/Makefile b/lisa/_assets/kmodules/lisa/Makefile index 0b68c78431d4d4bc147cc2a137c548abc9e15939..6b9fe31d39ae7b921ecc26006c15a0886e5af448 100644 --- a/lisa/_assets/kmodules/lisa/Makefile +++ b/lisa/_assets/kmodules/lisa/Makefile @@ -68,7 +68,7 @@ ifneq ($(KERNELRELEASE),) LISA_KMOD_NAME ?= lisa obj-m := $(LISA_KMOD_NAME).o -$(LISA_KMOD_NAME)-y := main.o tp.o wq.o features.o pixel6.o introspection_data.o +$(LISA_KMOD_NAME)-y := main.o tp.o wq.o features.o pixel6.o introspection_data.o perf_counters.o configs.o fs.o feature_params.o # -fno-stack-protector is needed to possibly undefined __stack_chk_guard symbol ccflags-y := "-I$(MODULE_SRC)" -std=gnu11 -fno-stack-protector -Wno-declaration-after-statement -Wno-error diff --git a/lisa/_assets/kmodules/lisa/configs.c b/lisa/_assets/kmodules/lisa/configs.c new file mode 100644 index 0000000000000000000000000000000000000000..dac0be5e516852d3d65ebf6de667aa402a6c92b1 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/configs.c @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#include "configs.h" +#include "feature_params.h" + +/* List of configs. */ +struct hlist_head cfg_list; + +void lisa_fs_remove(struct dentry *dentry); +void lisa_activate_config(bool value, struct lisa_cfg *cfg); + +struct lisa_cfg *allocate_lisa_cfg(const char *name) +{ + struct lisa_cfg *cfg; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return NULL; + + cfg->name = kstrdup(name, GFP_KERNEL); + if (!cfg->name) + goto error; + + return cfg; + +error: + kfree(cfg); + return NULL; +} + +int init_lisa_cfg(struct lisa_cfg *cfg, struct hlist_head *cfg_list, + struct dentry *dentry) +{ + cfg->dentry = dentry; + hlist_add_head(&cfg->node, cfg_list); + return 0; +} + +void free_lisa_cfg(struct lisa_cfg *cfg) +{ + /* De-activate the config. */ + lisa_activate_config(false, cfg); + drain_feature_param_entry_cfg(&cfg->list_param); + + /* Remove its dentries. */ + if (cfg->dentry) + lisa_fs_remove(cfg->dentry); + + hlist_del(&cfg->node); + kfree(cfg->name); + kfree(cfg); +} + +void drain_lisa_cfg(struct hlist_head *head) +{ + struct hlist_node *tmp; + struct lisa_cfg *cfg; + + hlist_for_each_entry_safe(cfg, tmp, head, node) + free_lisa_cfg(cfg); +} + +struct lisa_cfg *find_lisa_cfg(const char *name) +{ + struct lisa_cfg *cfg; + hlist_for_each_entry(cfg, &cfg_list, node) { + if (!strcmp(cfg->name, name)) + return cfg; + } + return NULL; +} \ No newline at end of file diff --git a/lisa/_assets/kmodules/lisa/configs.h b/lisa/_assets/kmodules/lisa/configs.h new file mode 100644 index 0000000000000000000000000000000000000000..75db677a9981db2999e6f517db89d9d44350d441 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/configs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CONFIGS_H +#define _CONFIGS_H + +#include "main.h" + +struct lisa_cfg { + struct dentry *dentry; + + /* Member of cfg_list. */ + struct hlist_node node; + + /* List of (struct feature_param_entry)->node_cfg. */ + struct hlist_head list_param; + + /* This config is currently activated. */ + bool activated; + char *name; +}; + +extern struct hlist_head cfg_list; + +struct lisa_cfg *allocate_lisa_cfg(const char *name); +int init_lisa_cfg(struct lisa_cfg *cfg, struct hlist_head *cfg_list, + struct dentry *dentry); +void free_lisa_cfg(struct lisa_cfg *cfg); +void drain_lisa_cfg(struct hlist_head *head); +struct lisa_cfg *find_lisa_cfg(const char *name); + +#endif // _CONFIGS_H diff --git a/lisa/_assets/kmodules/lisa/feature_params.c b/lisa/_assets/kmodules/lisa/feature_params.c new file mode 100644 index 0000000000000000000000000000000000000000..ac352b7f656bef357655dcbdf6f00854c161fee7 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/feature_params.c @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include "features.h" + +struct feature_param_entry_value *allocate_feature_param_entry_value(void) +{ + struct feature_param_entry_value *val; + + val = kmalloc(sizeof(*val), GFP_KERNEL); + if (!val) + return NULL; + + INIT_LIST_HEAD(&val->node); + return val; +} + +void init_feature_param_entry_value(struct feature_param_entry_value *val, + struct feature_param_entry *entry) +{ + /* Don't init the refcount for non-global values. */ + list_add_tail(&val->node, &entry->list_values); + val->entry = entry; +} + +void init_feature_param_entry_value_global(struct feature_param_entry_value *val, + struct feature_param_entry *entry, + struct list_head *head) +{ + refcount_set(&val->refcnt, 1); + list_add_tail(&val->node, head); + val->entry = entry; +} + +void free_feature_param_entry_value(struct feature_param_entry_value *val) +{ + list_del(&val->node); + kfree(val); +} + +void drain_feature_param_entry_value(struct list_head *head) +{ + struct feature_param_entry_value *val, *tmp; + + list_for_each_entry_safe(val, tmp, head, node) + free_feature_param_entry_value(val); +} + +struct feature_param_entry *allocate_feature_param_entry(void) +{ + struct feature_param_entry *entry; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + return entry; +} + +void init_feature_param_entry(struct feature_param_entry *entry, + struct lisa_cfg *cfg, struct feature_param *param) +{ + entry->param = param; + entry->cfg = cfg; + + INIT_LIST_HEAD(&entry->list_values); + hlist_add_head(&entry->node, ¶m->param_args); + hlist_add_head(&entry->node_cfg, &cfg->list_param); +} + +void free_feature_param_entry(struct feature_param_entry *entry) +{ + drain_feature_param_entry_value(&entry->list_values); + hlist_del(&entry->node); + hlist_del(&entry->node_cfg); + kfree(entry); +} + +void drain_feature_param_entry_cfg(struct hlist_head *head) +{ + struct feature_param_entry *entry; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(entry, tmp, head, node_cfg) + free_feature_param_entry(entry); +} + +int feature_param_add_new(struct feature_param_entry *entry, const char *v) +{ + struct feature_param *param = entry->param; + struct feature_param_entry_value *val; + int ret = 0; + + val = param->ops->set(v, entry); + if (IS_ERR_OR_NULL(val)) + return IS_ERR(val) ? PTR_ERR(val) : -EINVAL; + + if (param->validate) { + ret = param->validate(val); + if (ret) + goto error; + } + + init_feature_param_entry_value(val, entry); + + return ret; + +error: + free_feature_param_entry_value(val); + return ret; +} + +int feature_param_merge_common(struct feature_param_entry *added_entry) +{ + struct feature_param_entry_value *added_val, *merged_val, *new_val; + struct feature_param *param = added_entry->param; + struct list_head *head; + int ret = 0; + + /* Should have been checked already. */ + if (list_empty(&added_entry->list_values)) + return -EINVAL; + + head = ¶m->global_value; + + switch (param->mode) { + case FT_PARAM_MODE_SINGLE: + added_val = list_first_entry(&added_entry->list_values, + struct feature_param_entry_value, node); + + if (list_empty(head)) { + /* No global value set yet. Allocate the single value. */ + + new_val = allocate_feature_param_entry_value(); + if (!new_val) { + ret = -ENOMEM; + break; + } + + init_feature_param_entry_value_global( + new_val, added_val->entry, head); + ret = param->ops->copy(added_val, new_val); + if (ret) + goto free; + + break; + } + + /* Otherwise check added_val has the same value as the global config. */ + + merged_val = list_first_entry( + head, struct feature_param_entry_value, node); + if (!param->ops->is_equal(&added_val->data, merged_val)) { + pr_err("Single value must be set across configs for %s\n", + added_entry->param->name); + feature_param_entry_print(param, added_val); + feature_param_entry_print(param, merged_val); + ret = -EEXIST; + goto error; + } + + break; + + case FT_PARAM_MODE_SET: + list_for_each_entry(added_val, &added_entry->list_values, node) { + bool found = false; + + /* Check the value doesn't already exist. */ + list_for_each_entry(merged_val, head, node) { + if (param->ops->is_equal(&added_val->data, merged_val)) { + /* If the value exists, increase the refcnt. */ + refcount_inc(&merged_val->refcnt); + found = true; + break; + } + } + if (found) + continue; + + /* Else allocate a new value */ + new_val = allocate_feature_param_entry_value(); + if (!new_val) { + ret = -ENOMEM; + break; + } + + init_feature_param_entry_value_global( + new_val, added_val->entry, head); + ret = param->ops->copy(added_val, new_val); + if (ret) + goto free; + } + + break; + + default: + ret = -EINVAL; + break; + } + + return ret; + +free: + free_feature_param_entry_value(new_val); +error: + return ret; +} + +int feature_param_remove_config_common(struct feature_param_entry *removed_entry) +{ + struct feature_param_entry_value *removed_val, *merged_val; + struct feature_param *param = removed_entry->param; + struct list_head *head; + int ret = 0; + + /* Should have been checked already. */ + if (list_empty(&removed_entry->list_values)) + return -EINVAL; + + head = ¶m->global_value; + + list_for_each_entry(removed_val, &removed_entry->list_values, node) { + bool found = false; + + /* Check for an existing value. */ + list_for_each_entry(merged_val, head, node) { + if (!param->ops->is_equal(&removed_val->data, merged_val)) + continue; + + found = true; + + /* This was the last reference. Free. */ + if (refcount_dec_and_test(&merged_val->refcnt)) { + free_feature_param_entry_value(merged_val); + break; + } + } + + if (!found) { + pr_err("Value not found while deactivating config.\n"); + feature_param_entry_print(param, removed_val); + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int +feature_param_set_common(struct feature_param_entry *entry, void *data) +{ + struct feature_param_entry_value *val; + int ret = 0; + + switch (entry->param->mode) { + case FT_PARAM_MODE_SINGLE: + /* Single parameter, replace the pre-existing value. */ + /* + * TODO This might not be a good idea. The value is replaced + * even when the user thinks the value is appended. + * I.e. 'echo 1 >> file' will replace the pre-existing value. + */ + val = list_first_entry(&entry->list_values, + struct feature_param_entry_value, node); + free_feature_param_entry_value(val); + break; + case FT_PARAM_MODE_SET: + /* Don't allow duplicated values. */ + list_for_each_entry(val, &entry->list_values, node) + if (entry->param->ops->is_equal(data, val)) { + pr_err("Value already set.\n"); + ret = -EEXIST; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return 0; +} + +struct feature_param_entry_value * +feature_param_set_uint(const char *buf, struct feature_param_entry *entry) +{ + struct feature_param_entry_value *val; + unsigned int input_val; + int ret; + + if (!buf) + return ERR_PTR(-EINVAL); + + ret = kstrtouint(buf, 0, &input_val); + if (ret) + return ERR_PTR(ret); + + if (list_empty(&entry->list_values)) + goto new_val; + + ret = feature_param_set_common(entry, &input_val); + if (ret) + return ERR_PTR(ret); + +new_val: + val = allocate_feature_param_entry_value(); + if (!val) + return ERR_PTR(-ENOMEM); + + val->value = input_val; + return val; +} + +static size_t +feature_param_stringify_uint(const struct feature_param_entry_value *val, + char *buffer) +{ + return buffer ? sprintf(buffer, "%u", val->value) : + snprintf(NULL, 0, "%u", val->value); +} + +static int +feature_param_is_equal_uint(const void *data, + const struct feature_param_entry_value *val) +{ + return *(unsigned int *)data == val->value; +} + +static int +feature_param_copy_uint(const struct feature_param_entry_value *src_val, + struct feature_param_entry_value *val) +{ + val->value = src_val->value; + return 0; +} + +static struct feature_param_entry_value * +feature_param_set_string(const char *buf, struct feature_param_entry *entry) +{ + struct feature_param_entry_value *val; + int ret; + + if (!buf) + return ERR_PTR(-EINVAL); + + if (list_empty(&entry->list_values)) + goto new_val; + + ret = feature_param_set_common(entry, &buf); + if (ret) + return ERR_PTR(ret); + +new_val: + val = allocate_feature_param_entry_value(); + if (!val) + return ERR_PTR(-ENOMEM); + + val->data = kstrdup(buf, GFP_KERNEL); + return val; +} + +static size_t +feature_param_stringify_string(const struct feature_param_entry_value *val, + char *buf) +{ + size_t size = strlen(val->data); + if (buf) + memcpy(buf, val->data, size); + return size; +} + +static int +feature_param_is_equal_string(const void *data, + const struct feature_param_entry_value *val) +{ + return !strcmp(*(char **)data, val->data); +} + +static int +feature_param_copy_string(const struct feature_param_entry_value *src_val, + struct feature_param_entry_value *val) +{ + val->data = kstrdup(src_val->data, GFP_KERNEL); + return 0; +} + +const struct feature_param_ops feature_param_ops_uint = { + .set = feature_param_set_uint, + .stringify = feature_param_stringify_uint, + .is_equal = feature_param_is_equal_uint, + .copy = feature_param_copy_uint, +}; + +const struct feature_param_ops feature_param_ops_string = { + .set = feature_param_set_string, + .stringify = feature_param_stringify_string, + .is_equal = feature_param_is_equal_string, + .copy = feature_param_copy_string, +}; diff --git a/lisa/_assets/kmodules/lisa/feature_params.h b/lisa/_assets/kmodules/lisa/feature_params.h new file mode 100644 index 0000000000000000000000000000000000000000..33cdf6812db5fc894947268d2813c7c0a31af5f9 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/feature_params.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _FEATURE__PARAM_H +#define _FEATURE__PARAM_H + +#include + +#include "configs.h" +#include "features.h" +#include "main.h" +#include "tp.h" + +/* + * Struct containing one single data value. + * E.g., if the [0, 1, 2] pmu_raw_counters are set, + * each value is stored in a (struct feature_param_entry_value). + */ +struct feature_param_entry_value { + /* + * Member of a either: + * - (struct feature_param_entry)->list_values + * - (struct feature_param)->global_value + */ + struct list_head node; + + /* Parent entry. */ + struct feature_param_entry *entry; + + /* + * Refcount of the struct. + * Only meaningful for the (struct feature_param)->global_value, when values + * of multiple configs are merged. + */ + refcount_t refcnt; + + union { + unsigned int value; + void *data; + }; +}; + +/* + * Struct containing a list of values. + * E.g., if the [0, 1, 2] pmu_raw_counters are set for a 'config1', + * and [2, 3, 4] are set for a 'config2', each set of values will be + * referenced by a (struct feature_param_entry). + */ +struct feature_param_entry { + /* Member of (struct feature_param)->param_args */ + struct hlist_node node; + + /* Member of (struct lisa_cfg)->list_param */ + struct hlist_node node_cfg; + + /* List of (struct feature_param_entry_value)->node. */ + struct list_head list_values; + + /* Parent param. */ + struct feature_param *param; + + /* Parent cfg. */ + struct lisa_cfg *cfg; +}; + +enum feature_param_mode { + /* + * Among all configs, at most one value is allowed. + * I.e. for all the configs where a value is set, + * this value must be the same. + */ + FT_PARAM_MODE_SINGLE = 0, + /* + * Merge values of all configs by creating a set. + * E.g. pmu_raw_counters can have different counters enabled in + * different configs. The resulting value is a set of all the + * values of the different configs. + */ + FT_PARAM_MODE_SET = 1, +}; + +enum feature_param_type { + /* Standard parameter. */ + FT_PARAM_TYPE_STD = 0, + /* Specific to the 'lisa_features_param' parameter handling. */ + FT_PARAM_TYPE_AVAILABLE_FT, +}; + +struct feature_param { + const char *name; + enum feature_param_mode mode; + enum feature_param_type type; + struct dentry *dentry; + umode_t perms; + const struct feature_param_ops *ops; + int (*validate)(struct feature_param_entry_value *); + + /* List of (struct feature_param_entry)->node. */ + struct hlist_head param_args; + + /* List of (struct feature_param_entry_value)->node. */ + struct list_head global_value; + + /* Parent feature. */ + struct feature *feature; +}; + +struct feature_param_ops { + struct feature_param_entry_value *(*set) (const char *, struct feature_param_entry *); + size_t (*stringify) (const struct feature_param_entry_value *, char *); + int (*is_equal) (const void *, const struct feature_param_entry_value *); + int (*copy) (const struct feature_param_entry_value *, struct feature_param_entry_value *); +}; + +extern const struct feature_param_ops feature_param_ops_uint; +extern const struct feature_param_ops feature_param_ops_string; + +#define GET_PARAM_HANDLER(type) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(type, char *), \ + &feature_param_ops_string, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(type, unsigned int), \ + &feature_param_ops_uint, NULL)) + +#define __PARAM(__name, __mode, __type, __perms, __param_type, __feature) \ + (&(struct feature_param) { \ + .name = __name, \ + .mode = __mode, \ + .type = __type, \ + .perms = __perms, \ + .ops = GET_PARAM_HANDLER(__param_type), \ + .param_args = HLIST_HEAD_INIT, \ + .feature = &__FEATURE_NAME(__feature), \ + }) + +#define PARAM_SINGLE(name, perms, param_type, feature) \ + __PARAM(name, FT_PARAM_MODE_SINGLE, FT_PARAM_TYPE_STD, perms, param_type, __EVENT_FEATURE(feature)) +#define PARAM_SET(name, perms, param_type, feature) \ + __PARAM(name, FT_PARAM_MODE_SET, FT_PARAM_TYPE_STD, perms, param_type, __EVENT_FEATURE(feature)) + +#define FEATURE_PARAMS(...) \ + .params = (struct feature_param* []){__VA_ARGS__, NULL} \ + +#define EXPAND(...) __VA_ARGS__ +#define DEFINE_FEATURE_PARAMS(...) EXPAND(__VA_ARGS__) + +#define for_each_feature_param(param, pparam, feature) \ + if (feature->params) \ + for (pparam = feature->params, param = *pparam; param != NULL; pparam++, param = *pparam) + +#define feature_param_entry_print(param, val) { \ + bool success = false; \ + if (param->ops->stringify) { \ + size_t size = param->ops->stringify(val, NULL); \ + char *buf = kmalloc(size +1, GFP_KERNEL); \ + if (buf) { \ + buf[size] = '\0'; \ + size = param->ops->stringify(val, buf); \ + pr_err("Value: %s\n", buf); \ + kfree(buf); \ + success = true; \ + } \ + } \ + if (!success) \ + pr_err("Value: failed to print\n"); \ +} + +struct feature_param_entry_value *allocate_feature_param_entry_value(void); +void init_feature_param_entry_value(struct feature_param_entry_value *val, struct feature_param_entry *entry); +void free_feature_param_entry_value(struct feature_param_entry_value *val); +void drain_feature_param_entry_value(struct list_head *head); + +struct feature_param_entry *allocate_feature_param_entry(void); +void init_feature_param_entry(struct feature_param_entry *entry, struct lisa_cfg *cfg, struct feature_param *param); +void free_feature_param_entry(struct feature_param_entry *entry); +void drain_feature_param_entry_cfg(struct hlist_head *head); + +int feature_param_add_new(struct feature_param_entry *entry, const char *v); +int feature_param_merge_common(struct feature_param_entry *added_entry); +int feature_param_remove_config_common(struct feature_param_entry *removed_entry); + +#endif diff --git a/lisa/_assets/kmodules/lisa/features.c b/lisa/_assets/kmodules/lisa/features.c index 01e5c2c8c93564357588e89bf9beb756cfff2922..308b99159961d104c2355c8610277f2974762b79 100644 --- a/lisa/_assets/kmodules/lisa/features.c +++ b/lisa/_assets/kmodules/lisa/features.c @@ -90,7 +90,6 @@ static int __process_features(char **selected, size_t selected_len, feature_proc return ret; } - static int __list_feature(struct feature* feature) { if (!feature->__internal) pr_info(" %s", feature->name); @@ -109,7 +108,16 @@ int init_features(char **selected, size_t selected_len) { pr_info("Available features:"); __process_features(NULL, 0, __list_feature); - return __process_features(selected, selected_len, __enable_feature_explicitly); + + // TODO: features are now only initialized if the event is requested. + // __process_features(selected, selected_len, __enable_feature_explicitly); + + return 0; +} + +int init_single_feature(char *selected) +{ + return __process_features(&selected, 1, __enable_feature_explicitly); } static int __disable_explicitly_enabled_feature(struct feature* feature) { @@ -125,6 +133,10 @@ static int __disable_explicitly_enabled_feature(struct feature* feature) { return ret; } +int deinit_single_features(char *selected) { + return __process_features(&selected, 1, __disable_explicitly_enabled_feature); +} + int deinit_features(void) { return __process_features(NULL, 0, __disable_explicitly_enabled_feature); } @@ -137,3 +149,23 @@ int __placeholder_init(struct feature *feature) { int __placeholder_deinit(struct feature *feature) { return 0; } + +struct feature *find_feature(char *name) +{ + struct feature *feature; + + for_each_feature(feature) + if (!strcmp(name, feature->name)) + return feature; + return NULL; +} + +struct feature_param *find_feature_param(char *name, struct feature *feature) +{ + struct feature_param *param = NULL, **pparam; + + for_each_feature_param(param, pparam, feature) + if (!strcmp(name, param->name)) + break; + return param; +} diff --git a/lisa/_assets/kmodules/lisa/features.h b/lisa/_assets/kmodules/lisa/features.h index 31e322f41f113a9051edc4ebbe57bdf5c19a355f..b8b58c7b24f1ea6bab8a80d658dd764857e93883 100644 --- a/lisa/_assets/kmodules/lisa/features.h +++ b/lisa/_assets/kmodules/lisa/features.h @@ -1,13 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _FEATURE_H +#define _FEATURE_H + #include #include #include #include - #include -#ifndef _FEATURE_H -#define _FEATURE_H +#include "feature_params.h" +#include "main.h" /** * struct feature - LISA kernel module feature @@ -18,6 +21,7 @@ * @lock: Lock taken when enabling and disabling the feature. * @enable: Function pointer to the enable function. Return non-zero value in case of error. * @disable: Function pointer to the disable function. Return non-zero value in case of error. + * @params: Array of pointer to this feature's parameters. * * A struct feature represent an independent feature of the kernel module that * can be enabled and disabled dynamically. Features are ref-counted so that @@ -41,6 +45,9 @@ struct feature { * advantage of reference counting to ensure safe setup/teardown. */ bool __internal; + + /* Array of pointer to this feature's parameters. */ + struct feature_param **params; }; /* Start and stop address of the ELF section containing the struct feature @@ -49,6 +56,9 @@ struct feature { extern struct feature __lisa_features_start[]; extern struct feature __lisa_features_stop[]; +#define for_each_feature(feature) \ + for (feature=__lisa_features_start; feature < __lisa_features_stop; feature++) + /** * MAX_FEATURES - Maximum number of features allowed in this module. */ @@ -60,7 +70,7 @@ int __placeholder_deinit(struct feature *feature); #define __FEATURE_NAME(name) __lisa_feature_##name /* Weak definition, can be useful to deal with compiled-out features */ -#define __DEFINE_FEATURE_WEAK(feature_name) \ +#define __DEFINE_FEATURE_WEAK(feature_name, ...) \ __attribute__((weak)) DEFINE_MUTEX(__lisa_mutex_feature_##feature_name); \ __attribute__((weak)) struct feature __FEATURE_NAME(feature_name) = { \ .name = #feature_name, \ @@ -74,7 +84,7 @@ int __placeholder_deinit(struct feature *feature); .__enable_ret = 0, \ }; -#define __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, internal) \ +#define __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, internal, ...) \ DEFINE_MUTEX(__lisa_mutex_feature_##feature_name); \ struct feature __FEATURE_NAME(feature_name) __attribute__((unused,section(".__lisa_features"))) = { \ .name = #feature_name, \ @@ -86,6 +96,7 @@ int __placeholder_deinit(struct feature *feature); .lock = &__lisa_mutex_feature_##feature_name, \ .__internal = internal, \ .__enable_ret = 0, \ + DEFINE_FEATURE_PARAMS(__VA_ARGS__) \ }; /** @@ -99,7 +110,9 @@ int __placeholder_deinit(struct feature *feature); * DISABLE_FEATURE() on all the features that were enabled by ENABLE_FEATURE() * in enable_f() in order to keep accurate reference-counting. */ -#define DEFINE_FEATURE(feature_name, enable_f, disable_f) __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, false) +#define DEFINE_FEATURE(feature_name, enable_f, disable_f, ...) \ + __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, false, ##__VA_ARGS__) + /** * DEFINE_INTERNAL_FEATURE() - Same as DEFINE_FEATURE() but for internal features. @@ -109,7 +122,8 @@ int __placeholder_deinit(struct feature *feature); * multiple other features, e.g. to initialize and teardown the use of a kernel * API (workqueues, tracepoints etc). */ -#define DEFINE_INTERNAL_FEATURE(feature_name, enable_f, disable_f) __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, true) +#define DEFINE_INTERNAL_FEATURE(feature_name, enable_f, disable_f, ...) \ + __DEFINE_FEATURE_STRONG(feature_name, enable_f, disable_f, true, ##__VA_ARGS__) /** * DECLARE_FEATURE() - Declare a feature to test for its presence dynamically. @@ -125,7 +139,7 @@ int __placeholder_deinit(struct feature *feature); * Note that because of weak symbols limitations, a given compilation unit * cannot contain both DECLARE_FEATURE() and DEFINE_FEATURE(). */ -#define DECLARE_FEATURE(feature_name) __DEFINE_FEATURE_WEAK(feature_name) +#define DECLARE_FEATURE(feature_name, ...) __DEFINE_FEATURE_WEAK(feature_name, ##__VA_ARGS__) /** * FEATURE() - Pointer the the struct feature @@ -184,6 +198,14 @@ int __placeholder_deinit(struct feature *feature); */ int init_features(char **selected, size_t selected_len); +/** + * init_single_feature() - Initialize one feature + * @selected: Name of the feature to initialize. + * + * Cf. init_features() + */ +int init_single_feature(char *selected); + /** * deinit_features() - De-initialize features * @@ -191,4 +213,32 @@ int init_features(char **selected, size_t selected_len); * Return: non-zero in case of errors. */ int deinit_features(void); + +/** + * deinit_single_features() - De-initialize one feature + * + * Cf. deinit_features() + */ +int deinit_single_features(char *selected); + +/** + * find_feature() - Find the (struct feature) matching the input name. + * @name: Name of the feature to find. + * + * Return: (struct feature*) matching the input name if success. + * NULL otherwise. + */ +struct feature *find_feature(char *name); + +/** + * find_feature_param() - Find the (struct feature_param) of a feature + * matching the input name. + * @name: Name of the feature to find. + * @feature: Feature to search the (struct feature_param) from. + * + * Return: (struct feature_param*) matching the input name if success. + * NULL otherwise. + */ +struct feature_param *find_feature_param(char *name, struct feature *feature); + #endif diff --git a/lisa/_assets/kmodules/lisa/fs.c b/lisa/_assets/kmodules/lisa/fs.c new file mode 100644 index 0000000000000000000000000000000000000000..c6bbff2cf85934d9489ed0bdc9ddef6627c0a857 --- /dev/null +++ b/lisa/_assets/kmodules/lisa/fs.c @@ -0,0 +1,755 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include +#include + +#include "features.h" +#include "configs.h" + +static int lisa_fs_create_files(struct dentry *dentry, bool is_top_level, struct lisa_cfg *cfg); +static struct dentry * +lisa_fs_create_single(struct dentry *parent, const char *name, + const struct inode_operations *i_ops, + const struct file_operations *f_ops, umode_t mode, + void *data); + +#define LISA_FS_SUPER_MAGIC 0xcdb11bc9 + +/* Protect the interface. */ +static struct mutex interface_lock; + +int feature_param_lisa_validate(struct feature_param_entry_value *val) +{ + struct feature *feature; + + for_each_feature(feature) { + if (!strcmp(feature->name, val->data)) + return 0; + } + return -EINVAL; +} + +/* Handle feature names using the (struct feature_param) logic. */ +struct feature_param lisa_features_param = { + .name = "lisa_features_param", + .mode = FT_PARAM_MODE_SET, + .type = FT_PARAM_TYPE_AVAILABLE_FT, + .perms = S_IFREG | S_IRUGO | S_IWUGO, + .ops = &feature_param_ops_string, + .validate = feature_param_lisa_validate, + .param_args = HLIST_HEAD_INIT, + .global_value = LIST_HEAD_INIT(lisa_features_param.global_value), +}; + +static struct inode *lisa_fs_create_inode(struct super_block *sb, int mode) +{ + struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = current_time(inode); + } + + return inode; +} + +/* + * available_features handlers + */ + +static int lisa_features_available_show(struct seq_file *s, void *data) +{ + struct feature *feature; + + for_each_feature(feature) + if (!feature->__internal) + seq_printf(s, "%s\n", feature->name); + + return 0; +} + +static int lisa_features_available_open(struct inode *inode, struct file *file) +{ + return single_open(file, lisa_features_available_show, NULL); +} + +static struct file_operations lisa_available_features_fops = { + .owner = THIS_MODULE, + .open = lisa_features_available_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * activate handlers + */ + +static int lisa_activate_show(struct seq_file *s, void *data) +{ + struct lisa_cfg *cfg = s->private; + + seq_printf(s, "%d\n", cfg->activated); + return 0; +} + +static int update_global_value(struct lisa_cfg *cfg, int new_value) +{ + struct feature_param_entry *entry, *rollback_entry; + int ret = 0; + + /* For each parameter of the config. */ + hlist_for_each_entry(entry, &cfg->list_param, node_cfg) { + if (!list_empty(&entry->list_values)) { + /* For each value of this entry. */ + if (new_value) + ret = feature_param_merge_common(entry); + else + ret = feature_param_remove_config_common(entry); + if (ret) { + rollback_entry = entry; + goto rollback; + } + } + } + + return ret; + +rollback: + hlist_for_each_entry(entry, &cfg->list_param, node_cfg) { + if (entry == rollback_entry) + break; + + if (!list_empty(&entry->list_values)) { + /* For each value of this entry. */ + if (new_value) + ret = feature_param_remove_config_common(entry); + else + ret = feature_param_merge_common(entry); + if (ret) { + pr_err("Could not rollback config values\n"); + return ret; + } + } + } + + return ret; +} + +static bool is_feature_set(char *name) +{ + struct feature_param_entry_value *val; + + /* Check whether the feature is in the global set_features list. */ + list_for_each_entry(val, &lisa_features_param.global_value, node) + if (lisa_features_param.ops->is_equal(&name, val)) + return true; + return false; +} + +int lisa_activate_config(bool value, struct lisa_cfg *cfg) +{ + struct feature *feature; + int ret; + + if (cfg->activated == value) + return 0; + + /* All the global values have now been updated. Time to enable them. */ + + ret = update_global_value(cfg, value); + if (ret) + return ret; + + cfg->activated = value; + + for_each_feature(feature) { + if (!is_feature_set(feature->name)) { + /* + * Feature was enabled, and de-activating this config + * disabled the feature. + */ + if (feature->__explicitly_enabled && !cfg->activated) + deinit_single_features(feature->name); + continue; + } + + if (cfg->activated) { + /* + * Feature was enabled. By default, de-init before re-init the feature + * to catch potential modifications. + */ + if (feature->__explicitly_enabled) + deinit_single_features(feature->name); + init_single_feature(feature->name); + continue; + } + } + + return 0; +} + +static ssize_t lisa_activate_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + bool value; + int ret; + + if (kstrtobool_from_user(buf, count, &value)) + return -EINVAL; + + mutex_lock(&interface_lock); + ret = lisa_activate_config(value, (struct lisa_cfg *)s->private); + mutex_unlock(&interface_lock); + + return ret < 0 ? ret : count; +} + +static int lisa_activate_open(struct inode *inode, struct file *file) +{ + struct lisa_cfg *cfg = inode->i_private; + + return single_open(file, lisa_activate_show, cfg); +} + +static struct file_operations lisa_activate_fops = { + .owner = THIS_MODULE, + .open = lisa_activate_open, + .read = seq_read, + .write = lisa_activate_write, + .release = single_release, +}; + +/* + * set_features handlers + * available_features handlers + */ + +static void *lisa_param_feature_seq_start(struct seq_file *s, loff_t *pos) +{ + struct feature_param_entry *entry; + void *ret; + + mutex_lock(&interface_lock); + + entry = *(struct feature_param_entry **)s->private; + ret = seq_list_start(&entry->list_values, *pos); + + return ret; +} + +static int lisa_param_feature_seq_show(struct seq_file *s, void *v) +{ + struct feature_param_entry_value *val; + struct feature_param_entry *entry; + struct feature_param *param; + + entry = *(struct feature_param_entry **)s->private; + param = entry->param; + + val = hlist_entry(v, struct feature_param_entry_value, node); + + if (param->ops->stringify) { + size_t size = param->ops->stringify(val, NULL); + char *buf = kmalloc(size + 1, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + buf[size] = '\0'; + size = param->ops->stringify(val, buf); + seq_printf(s, "%s\n", buf); + kfree(buf); + } + + return 0; +} + +static void *lisa_param_feature_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct feature_param_entry *entry; + entry = *(struct feature_param_entry **)s->private; + + return seq_list_next(v, &entry->list_values, pos); +} + +static void lisa_param_feature_seq_stop(struct seq_file *s, void *v) +{ + mutex_unlock(&interface_lock); +} + +static const struct seq_operations lisa_param_feature_seq_ops = { + .start = lisa_param_feature_seq_start, + .next = lisa_param_feature_seq_next, + .stop = lisa_param_feature_seq_stop, + .show = lisa_param_feature_seq_show, +}; + +static int lisa_param_feature_open(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) { + struct feature_param_entry **entry; + + entry = __seq_open_private(file, &lisa_param_feature_seq_ops, + sizeof(entry)); + if (entry) + *entry = inode->i_private; + + return entry ? 0 : -ENOMEM; + } + return 0; +} + +#define MAX_BUF_SIZE 1024 +static ssize_t lisa_param_feature_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + struct feature_param_entry *entry = file->f_inode->i_private; + char *kbuf, *s, *sep; + ssize_t done = 0; + int ret; + + /* + * Don't modify the 'set_features' or any parameter value if the + * config is activated. The process is: + * - De-activate + * - Modify + * - Re-activate + */ + if (entry->cfg->activated) { + pr_err("Config must be deactivated before any update.\n"); + return -EBUSY; + } + + mutex_lock(&interface_lock); + + if (!(file->f_flags & O_APPEND)) + drain_feature_param_entry_value(&entry->list_values); + + kbuf = kmalloc(MAX_BUF_SIZE, GFP_KERNEL); + if (!kbuf) { + done = -ENOMEM; + goto leave; + } + + while (done < count) { + ssize_t size = count - done; + + if (size >= MAX_BUF_SIZE) + size = MAX_BUF_SIZE - 1; + + if (copy_from_user(kbuf, buf + done, size)) { + kfree(kbuf); + goto done; + } + kbuf[size] = '\0'; + s = sep = kbuf; + do { + sep = strchr(s, ','); + if (sep) { + *sep = '\0'; + ++sep; + ++done; + } + done += size = strlen(s); + /* skip leading whitespaces */ + while (isspace(*s) && *(s++)) + --size; + if (!*s) + goto next; + if (done < count && !sep) { + /* carry over ... */ + done -= strlen(s); + goto next; + } + /* skip trailing whitespaces */ + while (size && isspace(s[--size])); + if (strlen(s) > ++size) + s[size] = '\0'; + + ret = feature_param_add_new(entry, s); + if (ret) { + done = ret; + goto done; + } + + if (ppos) + *ppos += 1; + next: + s = sep; + } while (s); + } +done: + kfree(kbuf); +leave: + mutex_unlock(&interface_lock); + return done; +} + +int lisa_param_feature_release(struct inode *inode, struct file *file) +{ + return file->f_mode & FMODE_READ ? seq_release_private(inode, file) : 0; +} + +static struct file_operations lisa_param_feature_fops = { + .owner = THIS_MODULE, + .open = lisa_param_feature_open, + .read = seq_read, + .write = lisa_param_feature_write, + .release = lisa_param_feature_release, +}; + +///////////////////////////////////// +// configs +///////////////////////////////////// + +/* TODO: + * linux: + * int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *, + * umode_t); + * android: + * int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, + * umode_t); + */ + +static int lisa_fs_syscall_mkdir(struct mnt_idmap *idmap, + struct inode *inode, struct dentry *dentry, + umode_t mode) +{ + struct dentry *my_dentry; + struct lisa_cfg *cfg; + int ret; + + cfg = allocate_lisa_cfg(dentry->d_name.name); + if (!cfg) + return -ENOMEM; + + mutex_lock(&interface_lock); + + my_dentry = lisa_fs_create_single(dentry->d_parent, dentry->d_name.name, + &simple_dir_inode_operations, + &simple_dir_operations, + S_IFDIR | mode, cfg); + if (!my_dentry) + goto error; + + ret = init_lisa_cfg(cfg, &cfg_list, my_dentry); + if (ret) + return -ENOMEM; + + lisa_fs_create_files(my_dentry, false, cfg); + mutex_unlock(&interface_lock); + return 0; + +error: + free_lisa_cfg(cfg); + mutex_unlock(&interface_lock); + return ret; +} + +void lisa_fs_remove(struct dentry *dentry) +{ + simple_recursive_removal(dentry, NULL); +} + +static int lisa_fs_syscall_rmdir(struct inode *inode, struct dentry *dentry) +{ + struct lisa_cfg *cfg; + cfg = inode->i_private; + + inode_unlock(inode); + inode_unlock(d_inode(dentry)); + + mutex_lock(&interface_lock); + + cfg = find_lisa_cfg(dentry->d_name.name); + if (!cfg) + pr_err("Failed to find config: %s\n", dentry->d_name.name); + + free_lisa_cfg(cfg); + + mutex_unlock(&interface_lock); + + inode_lock_nested(inode, I_MUTEX_PARENT); + inode_lock(d_inode(dentry)); + + return 0; +} + +const struct inode_operations lisa_fs_dir_inode_operations = { + .lookup = simple_lookup, + .mkdir = lisa_fs_syscall_mkdir, + .rmdir = lisa_fs_syscall_rmdir, +}; + +///////////////////////////////////// +// Main files +///////////////////////////////////// + +static struct dentry * +lisa_fs_create_single(struct dentry *parent, const char *name, + const struct inode_operations *i_ops, + const struct file_operations *f_ops, umode_t mode, + void *data) +{ + struct dentry *dentry; + struct inode *inode; + + dentry = d_alloc_name(parent, name); + if (!dentry) + return NULL; + inode = lisa_fs_create_inode(parent->d_sb, mode); + if (!inode) { + dput(dentry); + return NULL; + } + + if (mode & S_IFREG) { + inode->i_fop = f_ops; + } else { + inode->i_op = i_ops; + inode->i_fop = f_ops; + } + inode->i_private = data; + d_add(dentry, inode); + if (mode & S_IFDIR) { + inc_nlink(d_inode(parent)); + inc_nlink(inode); + } + + return dentry; +} + +/* Called with interface_lock */ +static int +lisa_fs_create_files(struct dentry *parent, bool is_top_level, struct lisa_cfg *cfg) +{ + struct feature_param_entry *entry; + struct feature *feature; + + entry = allocate_feature_param_entry(); + if (!entry) + return -ENOMEM; + + init_feature_param_entry(entry, cfg, &lisa_features_param); + + /* set_features: enable a feature - RW. */ + if (!lisa_fs_create_single(parent, "set_features", + NULL, + &lisa_param_feature_fops, + S_IFREG | S_IRUGO | S_IWUGO, entry)) { + free_feature_param_entry(entry); + return -ENOMEM; + } + + /* available_features: list available features - RO. */ + if (!lisa_fs_create_single(parent, "available_features", + NULL, + &lisa_available_features_fops, + S_IFREG | S_IRUGO, &lisa_features_param)) + return -ENOMEM; + + /* activate: activate the selected (and configured) features - RW. */ + if (!lisa_fs_create_single(parent, "activate", + NULL, + &lisa_activate_fops, + S_IFREG | S_IRUGO | S_IWUGO, cfg)) + return -ENOMEM; + + /* configs: Dir containing configurations, only setup at the top level. */ + if (is_top_level) { + if (!lisa_fs_create_single(parent, "configs", + &lisa_fs_dir_inode_operations, + &simple_dir_operations, + S_IFDIR | S_IRUGO, NULL)) + return -ENOMEM; + } + + /* Create a dir for features having parameters. */ + for_each_feature(feature) { + struct feature_param *param, **pparam; + struct dentry *dentry; + + if (!feature->params) + continue; + + dentry = lisa_fs_create_single(parent, feature->name, + &simple_dir_inode_operations, + &simple_dir_operations, + S_IFDIR | S_IRUGO, cfg); + if (!dentry) { + pr_err("Failed to initialize feature's (%s) root node\n", + feature->name); + return -ENOMEM; + } + + for_each_feature_param(param, pparam, feature) { + entry = allocate_feature_param_entry(); + if (!entry) + return -ENOMEM; + + init_feature_param_entry(entry, cfg, param); + + if (!lisa_fs_create_single(dentry, param->name, NULL, + &lisa_param_feature_fops, + S_IFREG | S_IRUGO, entry)) { + free_feature_param_entry(entry); + return -ENOMEM; + } + } + } + + return 0; +} + +///////////////////////////////////// +// Super block +///////////////////////////////////// + +static struct super_operations lisa_super_ops = { + .statfs = simple_statfs, +}; + +static int lisa_fs_fill_super(struct super_block *sb, struct fs_context *fc) +{ + struct lisa_cfg *cfg; + struct inode *root; + int ret = -ENOMEM; + + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; + sb->s_magic = LISA_FS_SUPER_MAGIC; + sb->s_op = &lisa_super_ops; + + root = lisa_fs_create_inode(sb, S_IFDIR | S_IRUGO); + if (!root) + return -ENOMEM; + + root->i_op = &simple_dir_inode_operations; + root->i_fop = &simple_dir_operations; + + sb->s_root = d_make_root(root); + if (!sb->s_root) + goto error1; + + cfg = allocate_lisa_cfg("root"); + if (!cfg) + goto error2; + + mutex_lock(&interface_lock); + + ret = lisa_fs_create_files(sb->s_root, true, cfg); + if (ret) + goto error3; + + ret = init_lisa_cfg(cfg, &cfg_list, NULL); + if (ret) + goto error4; + + mutex_unlock(&interface_lock); + + return 0; + +error4: + free_lisa_cfg(cfg); +error3: + mutex_lock(&interface_lock); +error2: + dput(sb->s_root); +error1: + iput(root); + + return ret; +} + +static int lisa_fs_get_tree(struct fs_context *fc) +{ + return get_tree_single(fc, lisa_fs_fill_super); +} + +static const struct fs_context_operations lisa_fs_context_ops = { + .get_tree = lisa_fs_get_tree, +}; + +static int lisa_init_fs_context(struct fs_context *fc) +{ + fc->ops = &lisa_fs_context_ops; + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; +} + +static void lisa_fs_kill_sb(struct super_block *sb) +{ + drain_lisa_cfg(&cfg_list); + + /* + * Free the lisa_features_param param, + * which is not bound to any feature. + */ + drain_feature_param_entry_value(&lisa_features_param.global_value); + + /* Free the inodes/dentries. */ + kill_litter_super(sb); +} + +static struct file_system_type lisa_fs_type = { + .owner = THIS_MODULE, + .name = "lisa", + .init_fs_context = lisa_init_fs_context, + .kill_sb = lisa_fs_kill_sb, +}; + +/* + * Note: Cannot initialize global_value list of an unnamed struct in __PARAM + * using LIST_HEAD_INIT. Need to have a function to do this. + */ +void init_feature_param(void) +{ + struct feature_param *param, **pparam; + struct feature *feature; + + for_each_feature(feature) + for_each_feature_param(param, pparam, feature) + INIT_LIST_HEAD(¶m->global_value); +} + +int init_lisa_fs(void) +{ + int ret; + + init_feature_param(); + mutex_init(&interface_lock); + INIT_HLIST_HEAD(&cfg_list); + + ret = sysfs_create_mount_point(fs_kobj, "lisa"); + if (ret) + goto error; + + ret = register_filesystem(&lisa_fs_type); + if (ret) { + sysfs_remove_mount_point(fs_kobj, "lisa"); + goto error; + } + + return ret; + +error: + pr_err("Could not install lisa fs.\n"); + return ret; +} + +void exit_lisa_fs(void) +{ + unregister_filesystem(&lisa_fs_type); + sysfs_remove_mount_point(fs_kobj, "lisa"); +} diff --git a/lisa/_assets/kmodules/lisa/ftrace_events.h b/lisa/_assets/kmodules/lisa/ftrace_events.h index fefa34bc25e59e427d393fb475d4f889a2a169df..c8e8982d25cf02f502e7b06b77c32e6136218701 100644 --- a/lisa/_assets/kmodules/lisa/ftrace_events.h +++ b/lisa/_assets/kmodules/lisa/ftrace_events.h @@ -440,6 +440,25 @@ TRACE_EVENT(lisa__pixel6_emeter, __entry->ts, __entry->device, __entry->chan, __entry->chan_name, __entry->value) ); +TRACE_EVENT(lisa__perf_counter, + TP_PROTO(unsigned int cpu, unsigned int counter_id, u64 value), + TP_ARGS(cpu, counter_id, value), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( unsigned int, counter_id ) + __field( u64, value ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->counter_id = counter_id; + __entry->value = value; + ), + + TP_printk("cpu=%u counter_id=%u value=%llu", + __entry->cpu, __entry->counter_id, __entry->value) +); #endif /* _FTRACE_EVENTS_H */ /* This part must be outside protection */ diff --git a/lisa/_assets/kmodules/lisa/main.c b/lisa/_assets/kmodules/lisa/main.c index 05ec9b2cfcb167a5af51d3aba0a428d9746caca1..ff4e3f1914334dde3351c6251e13d2efec1f9bcb 100644 --- a/lisa/_assets/kmodules/lisa/main.c +++ b/lisa/_assets/kmodules/lisa/main.c @@ -14,12 +14,17 @@ static char* version = LISA_MODULE_VERSION; module_param(version, charp, 0); MODULE_PARM_DESC(version, "Module version defined as sha1sum of the module sources"); + +int init_lisa_fs(void); +void exit_lisa_fs(void); + static char *features[MAX_FEATURES]; unsigned int features_len = 0; module_param_array(features, charp, &features_len, 0); MODULE_PARM_DESC(features, "Comma-separated list of features to enable. Available features are printed when loading the module"); static void modexit(void) { + exit_lisa_fs(); if (deinit_features()) pr_err("Some errors happened while unloading LISA kernel module\n"); } @@ -31,6 +36,11 @@ static int __init modinit(void) { if (strcmp(version, LISA_MODULE_VERSION)) { pr_err("Lisa module version check failed. Got %s, expected %s\n", version, LISA_MODULE_VERSION); return -EPROTO; + + ret = init_lisa_fs(); + if (ret) { + pr_err("Failed to setup lisa_fs\n"); + return ret; } pr_info("Kernel features detected. This will impact the module features that are available:\n"); @@ -45,6 +55,8 @@ static int __init modinit(void) { if (ret) { pr_err("Some errors happened while loading LISA kernel module\n"); + exit_lisa_fs(); + /* Use one of the standard error code */ ret = -EINVAL; diff --git a/lisa/_assets/kmodules/lisa/perf_counters.c b/lisa/_assets/kmodules/lisa/perf_counters.c new file mode 100644 index 0000000000000000000000000000000000000000..ede74a5e04334bbd007a26408d2c70b02206c05b --- /dev/null +++ b/lisa/_assets/kmodules/lisa/perf_counters.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2023 ARM Ltd. +#include +#if defined(CONFIG_HW_PERF_EVENTS) && defined(CONFIG_ARM_PMU) +#include +#endif +#include "main.h" +#include "ftrace_events.h" +#include "tp.h" + +#include "features.h" + +#define MAX_PERF_COUNTERS 6 + +#define GENERIC_COUNTERS_STR "generic_counters" +#define PMU_RAW_COUNTERS_STR "pmu_raw_counters" +#define PMU_TEST_SINGLE_STR "pmu_test_single" + +/* Initial set of supported counters to be enabled through module params */ +struct perfctr_desc { + /* unique name to identify the counter */ + const char *name; + /* counter id (may be generic or raw) */ + u64 id; + enum perf_type_id type; + /* enable by default if no counters requested */ + bool default_on; +}; + +#define PERFCTR_DESC(__name, __id, __type, __en) \ + ((struct perfctr_desc) { \ + .name = __name, .id = __id, .type = __type, .default_on = __en, \ + }) + +#define PERFCTR_DESC_COUNT_HW(__name, __id, __en) \ + PERFCTR_DESC(__name, __id, PERF_TYPE_HARDWARE, __en) + +/* Initial set of supported counters to be enabled based on provided event names */ +static const struct perfctr_desc perfctr_generic_lt [] = { + PERFCTR_DESC_COUNT_HW("cpu_cycles", PERF_COUNT_HW_CPU_CYCLES, 1), + PERFCTR_DESC_COUNT_HW("inst_retired", PERF_COUNT_HW_INSTRUCTIONS, 0), + PERFCTR_DESC_COUNT_HW("l1d_cache", PERF_COUNT_HW_CACHE_REFERENCES, 0), + PERFCTR_DESC_COUNT_HW("l1d_cache_refill", PERF_COUNT_HW_CACHE_MISSES, 0), + PERFCTR_DESC_COUNT_HW("pc_write_retired", PERF_COUNT_HW_BRANCH_INSTRUCTIONS, 0), + PERFCTR_DESC_COUNT_HW("br_mis_pred", PERF_COUNT_HW_BRANCH_MISSES, 0), + PERFCTR_DESC_COUNT_HW("bus_cycles", PERF_COUNT_HW_BUS_CYCLES, 0), + PERFCTR_DESC_COUNT_HW("stall_frontend", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, 0), + PERFCTR_DESC_COUNT_HW("stall_backend", PERF_COUNT_HW_STALLED_CYCLES_BACKEND, 0), +}; + +struct perfctr_event_entry { + struct hlist_node node; + struct hlist_node group_link; + struct perf_event *event; + struct perfctr_event_group *group; + struct rcu_head rcu_head; +}; + +struct perfctr_event_group { + struct list_head node; + struct hlist_head entries; + u64 raw_id; +}; + +struct perfctr_pcpu_data { + struct hlist_head events; +}; + +struct perfctr_core { + struct list_head events; + struct perfctr_pcpu_data __percpu *pcpu_data; + unsigned int nr_events; + unsigned int max_nr_events; +}; + +static inline void perfctr_show_supported_generic_events(void) +{ + int i; + + pr_info("Possible (subject to actual support) generic perf events: "); + for (i = 0; i < ARRAY_SIZE(perfctr_generic_lt); ++i) + printk(KERN_CONT "%s, ", perfctr_generic_lt[i].name); +} + +static void perfctr_event_release_entry(struct perfctr_event_entry *entry); + +static int perfctr_event_activate_single(struct perfctr_core *perf_data, + struct perf_event_attr *attr) +{ + struct perfctr_event_entry *entry= NULL; + struct perfctr_event_group *group; + struct hlist_node *next; + int cpu; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return -ENOMEM; + + group->raw_id = PERF_COUNT_HW_MAX; + + for_each_online_cpu(cpu) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto activate_failed; + + entry->event = + /* No overflow handler, at least not at this point */ + perf_event_create_kernel_counter(attr, cpu, NULL, + NULL, NULL); + if (IS_ERR(entry->event)) { + pr_err("Failed to create counter id=%llu on cpu%d\n", + attr->config, cpu); + goto activate_failed; + } + + perf_event_enable(entry->event); + /* + * the PMU driver might still fail to assign a slot for a given + * counter (@see armpmu_add) which leaves the event ineffective + */ + if (entry->event->state != PERF_EVENT_STATE_ACTIVE) { + pr_err("Failed to enable counter id=%llu on cpu%d\n", + attr->config, cpu); + perf_event_disable(entry->event); + perf_event_release_kernel(entry->event); + goto activate_failed; + } + + hlist_add_head_rcu(&entry->node, + &per_cpu_ptr(perf_data->pcpu_data, cpu)->events); + + hlist_add_head(&entry->group_link, &group->entries); + entry->group = group; + + /* One-time only */ + if (group->raw_id != PERF_COUNT_HW_MAX) + continue; + if (attr->type == PERF_TYPE_RAW || !IS_ENABLED(CONFIG_ARM_PMU)) { + group->raw_id = attr->config; + } else { + struct arm_pmu *arm_pmu; + + arm_pmu = to_arm_pmu(entry->event->pmu); + /* There needs to be a better way to do this !!*/ + group->raw_id = arm_pmu->map_event(entry->event); + } + } + list_add_tail(&group->node, &perf_data->events); + ++perf_data->nr_events; + + pr_info("%s event counter id=%llu activated on cpus=%*pbl", + attr->type == PERF_TYPE_RAW ? "PMU raw" : "Generic perf", + attr->config, cpumask_pr_args(cpu_online_mask)); + + return 0; + +activate_failed: + if (entry) + kfree(entry); + + hlist_for_each_entry(entry, &group->entries, group_link) { + hlist_del_rcu(&entry->node); + } + synchronize_rcu(); + hlist_for_each_entry_safe(entry, next, &group->entries, group_link) { + hlist_del(&entry->group_link); + perfctr_event_release_entry(entry); + } + kfree(group); + return -ENOMEM; + +} + +/* Lookup match type */ +enum perfctr_match_type { + PERFCTR_MATCH_NAME, + PERFCTR_MATCH_STATUS +}; + +struct perfctr_match { + union { + char *name; /* generic perf hw event name */ + bool status; /* enable by default */ + }; + enum perfctr_match_type type; +}; + +static int perfctr_event_activate(struct perfctr_core *perf_data, + const struct perfctr_match *match) +{ + int result = -EINVAL; + int i; + + struct perf_event_attr attr = { + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, + }; + + for (i = 0; i < ARRAY_SIZE(perfctr_generic_lt); ++i) { + switch (match->type) { + case PERFCTR_MATCH_NAME: + if (strcmp(match->name, perfctr_generic_lt[i].name)) + continue; + break; + case PERFCTR_MATCH_STATUS: + if (match->status != perfctr_generic_lt[i].default_on) + continue; + else + break; + default: + unreachable(); + } + attr.config = perfctr_generic_lt[i].id; + attr.type = perfctr_generic_lt[i].type; + + result = perfctr_event_activate_single(perf_data, &attr); + if (!result || match->type == PERFCTR_MATCH_NAME) + break; + } + return result; +} + +static void perfctr_event_release_entry(struct perfctr_event_entry *entry) +{ + perf_event_disable(entry->event); + perf_event_release_kernel(entry->event); + kfree(entry); +} + +static void perfctr_events_release_group(struct perfctr_core *perf_data, + struct perfctr_event_group *group) +{ + struct perfctr_event_entry *entry; + struct hlist_node *next; + + hlist_for_each_entry(entry, &group->entries, group_link) { + hlist_del_rcu(&entry->node); + } + synchronize_rcu(); + hlist_for_each_entry_safe(entry, next, &group->entries, group_link) { + hlist_del(&entry->group_link); + perfctr_event_release_entry(entry); + } + list_del(&group->node); + kfree(group); + --perf_data->nr_events; +} + +static void perfctr_events_release(struct perfctr_core *perf_data) +{ + struct perfctr_event_group *group, *next; + + list_for_each_entry_safe(group, next, &perf_data->events, node) { + perfctr_events_release_group(perf_data, group); + } +} + +static void perfctr_sched_switch_probe(struct feature *feature, bool preempt, + struct task_struct *prev, + struct task_struct *next, + unsigned int prev_state) +{ + struct perfctr_core *perf_data = feature->data; + + if (trace_lisa__perf_counter_enabled()) { + struct perfctr_event_entry *entry; + struct hlist_head *entry_list; + int cpu = smp_processor_id(); + u64 value = 0; + + entry_list = &per_cpu_ptr(perf_data->pcpu_data, cpu)->events; + + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, entry_list, node) { + /* + * The approach taken is a *semi*-safe one as: + * - the execution context is one as of the caller + * (__schedule) with preemption and interrupts being + * disabled + * - the events being traced are per-cpu ones only + * - kernel counter so no inheritance (no child events) + * - counter is being read on/for a local cpu + */ + struct perf_event *event = entry->event; + + event->pmu->read(event); + value = local64_read(&event->count); + trace_lisa__perf_counter(cpu, entry->group->raw_id, value); + } + rcu_read_unlock(); + } +} + +static int perfctr_register_events(struct perfctr_core *perf_data, struct feature *feature) +{ + struct feature_param *generic_cnt_param, *pmu_raw_param, *pmu_test_single_param; + struct perfctr_match match; + unsigned int count; + int result = 0; + + unsigned long generic_perf_events_count, pmu_raw_counters_count, pmu_test_single_count; + struct feature_param_entry_value *val; + + generic_cnt_param = find_feature_param(GENERIC_COUNTERS_STR, feature); + pmu_raw_param = find_feature_param(PMU_RAW_COUNTERS_STR, feature); + pmu_test_single_param = find_feature_param(PMU_TEST_SINGLE_STR, feature); + + generic_perf_events_count = list_count_elements(&generic_cnt_param->global_value); + pmu_raw_counters_count = list_count_elements(&pmu_raw_param->global_value); + pmu_test_single_count = list_count_elements(&pmu_test_single_param->global_value); + + count = generic_perf_events_count + pmu_raw_counters_count; + if (count > perf_data->max_nr_events) { + pr_err("Requested more than max %d counters\n", + perf_data->max_nr_events); + return -EINVAL; + } + + if (generic_perf_events_count) { + match.type = PERFCTR_MATCH_NAME; + list_for_each_entry(val, &generic_cnt_param->global_value, node) { + match.name = (char*)val->data; + result = perfctr_event_activate(perf_data, &match); + if (result) { + pr_err("Failed to activate event counter: %s\n", + match.name); + perfctr_show_supported_generic_events(); + goto done; + } + } + } + + if (pmu_raw_counters_count) { + struct perf_event_attr attr = { + .size = sizeof(struct perf_event_attr), + .type = PERF_TYPE_RAW, + .pinned = 1, + .disabled = 1, + }; + + list_for_each_entry(val, &pmu_raw_param->global_value, node) { + struct perfctr_event_group *group; + bool duplicate = false; + + attr.config = val->value; + /* Skip duplicates */ + list_for_each_entry(group, &perf_data->events, node) { + if (group->raw_id == attr.config) { + duplicate = true; + break; + } + } + + result = duplicate ? 0 : perfctr_event_activate_single(perf_data, &attr); + if (result) { + pr_err("Failed to activate event counter: %llu\n", + attr.config); + goto done; + }; + } + } + if (!perf_data->nr_events) { + match.type = PERFCTR_MATCH_STATUS; + match.status = true; + result = perfctr_event_activate(perf_data, &match); + } +done: + /* All or nothing ..... */ + if (result) + perfctr_events_release(perf_data); + return result; +} + +static void perfctr_pmu_discover(struct perfctr_core *perf_data) +{ + struct perf_event *event; + cpumask_var_t active_mask; + int cpu; + + /* + * This is absolutely loathsome but there seems to be no other way + * to poke relevant pmu driver for details so, there it is .... + */ + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + + perf_data->max_nr_events = MAX_PERF_COUNTERS; + + if (!IS_ENABLED(CONFIG_ARM_PMU)) + return; + + if (!zalloc_cpumask_var(&active_mask, GFP_KERNEL)) + return; + + for_each_possible_cpu(cpu) { + + if (cpumask_test_cpu(cpu, active_mask)) + continue; + + event = perf_event_create_kernel_counter(&attr, cpu, NULL , + NULL, NULL); + + if (IS_ERR(event)) { + pr_err("Failed to create an event (cpu%d) while discovery\n", + cpu); + break; + } + + if (event->pmu) { + struct arm_pmu *pmu = to_arm_pmu(event->pmu); + + perf_data->max_nr_events = min_t(unsigned int, + perf_data->max_nr_events, + pmu->num_events); + + cpumask_or(active_mask, active_mask, &pmu->supported_cpus); + + } + perf_event_release_kernel(event); + + if (cpumask_equal(active_mask, cpu_possible_mask)) + break; + } + free_cpumask_var(active_mask); + pr_info("Max of %d PMU counters available on cpus=%*pbl\n", + perf_data->max_nr_events, cpumask_pr_args(cpu_possible_mask)); + return; +} + +static int perfctr_disable(struct feature *feature); + +static int perfctr_enable(struct feature *feature) +{ + struct perfctr_core *perf_data; + + if (!IS_ENABLED(CONFIG_HW_PERF_EVENTS)) { + pr_err("Missing support for HW performance event counters\n"); + return 1; + } + + perf_data = kzalloc(sizeof(*perf_data), GFP_KERNEL); + if (!perf_data) + return 1; + + INIT_LIST_HEAD(&perf_data->events); + + feature->data = perf_data; + + perf_data->pcpu_data = alloc_percpu(struct perfctr_pcpu_data); + if (!perf_data->pcpu_data) { + return 1; + } + + perfctr_pmu_discover(perf_data); + + if (perfctr_register_events(perf_data, feature)) + return 1; + + if (!perf_data->nr_events) + pr_warn("No counters have been activated\n"); + + return 0; +} + +static int perfctr_disable(struct feature *feature) +{ + struct perfctr_core *perf_data = feature->data; + + if (!perf_data) + return 0; + + if (perf_data->pcpu_data) { + perfctr_events_release(perf_data); + free_percpu(perf_data->pcpu_data); + } + kfree(perf_data); + feature->data = NULL; + return 0; +} +DEFINE_EXTENDED_TP_EVENT_FEATURE(lisa__perf_counter, + TP_PROBES(TP_PROBE("sched_switch", perfctr_sched_switch_probe)), + perfctr_enable, perfctr_disable, + FEATURE_PARAMS( + PARAM_SET(GENERIC_COUNTERS_STR, + S_IFREG | S_IRUGO | S_IWUGO, + typeof(char *), lisa__perf_counter), + PARAM_SET(PMU_RAW_COUNTERS_STR, + S_IFREG | S_IRUGO | S_IWUGO, + typeof(unsigned int), lisa__perf_counter), + PARAM_SINGLE(PMU_TEST_SINGLE_STR, /* TODO Only for testing. */ + S_IFREG | S_IRUGO | S_IWUGO, + typeof(unsigned int), lisa__perf_counter))); diff --git a/lisa/_assets/kmodules/lisa/tp.h b/lisa/_assets/kmodules/lisa/tp.h index 15c5210da5a861fc05896878aa6fcc05a348bd53..6336383cdc42c72f6b112feeea89a8363fd82b8c 100644 --- a/lisa/_assets/kmodules/lisa/tp.h +++ b/lisa/_assets/kmodules/lisa/tp.h @@ -106,9 +106,9 @@ struct __tp_probe { * user-defined enable/disable functions. If the tracepoint is not found, the * user functions will not be called. */ -#define DEFINE_EXTENDED_TP_FEATURE(feature_name, probes, enable_f, disable_f) \ +#define DEFINE_EXTENDED_TP_FEATURE(feature_name, probes, enable_f, disable_f, ...) \ DEFINE_TP_ENABLE_DISABLE(feature_name, probes, CONCATENATE(__tp_feature_enable_, feature_name), enable_f, CONCATENATE(__tp_feature_disable_, feature_name), disable_f); \ - DEFINE_FEATURE(feature_name, CONCATENATE(__tp_feature_enable_, feature_name), CONCATENATE(__tp_feature_disable_, feature_name)); + DEFINE_FEATURE(feature_name, CONCATENATE(__tp_feature_enable_, feature_name), CONCATENATE(__tp_feature_disable_, feature_name), ##__VA_ARGS__); /** * DEFINE_TP_FEATURE() - Same as DEFINE_EXTENDED_TP_FEATURE() without custom @@ -117,18 +117,25 @@ struct __tp_probe { #define DEFINE_TP_FEATURE(feature_name, probes) DEFINE_EXTENDED_TP_FEATURE(feature_name, probes, NULL, NULL) #define __EVENT_FEATURE(event_name) CONCATENATE(event__, event_name) - /** * DEFINE_TP_EVENT_FEATURE() - Same as DEFINE_TP_FEATURE() with automatic * "event__" prefixing of the feature name. */ #define DEFINE_TP_EVENT_FEATURE(event_name, probes) DEFINE_TP_FEATURE(__EVENT_FEATURE(event_name), probes) +/** + * __DEFINE_EXTENDED_TP_EVENT_FEATURE - Wrapper for + * DEFINE_EXTENDED_TP_EVENT_FEATURE to allow safe macro-expansion for + * __EVENT_FEATURE + */ +#define __DEFINE_EXTENDED_TP_EVENT_FEATURE(feature_name, ...) \ + DEFINE_EXTENDED_TP_FEATURE(feature_name, ##__VA_ARGS__) /** * DEFINE_EXTENDED_TP_EVENT_FEATURE() - Same as DEFINE_EXTENDED_TP_FEATURE() * with automatic "event__" prefixing of the feature name. */ -#define DEFINE_EXTENDED_TP_EVENT_FEATURE(event_name, probes, enable_f, disable_f) DEFINE_EXTENDED_TP_FEATURE(__EVENT_FEATURE(event_name), probes, enable_f, disable_f) +#define DEFINE_EXTENDED_TP_EVENT_FEATURE(event_name, probes, enable_f, disable_f, ...) \ + DEFINE_EXTENDED_TP_FEATURE(__EVENT_FEATURE(event_name), probes, enable_f, disable_f, ##__VA_ARGS__) #define __DEPRECATED_EVENT_ENABLE(event_name) CONCATENATE(__enable_deprecated_feature_, __EVENT_FEATURE(event_name)) /** diff --git a/lisa/_assets/kmodules/lisa/utils.h b/lisa/_assets/kmodules/lisa/utils.h index c7ab03cea18fae6a973d8ce09cfb0f9312998f8d..c9776d6d9bab30a041d4c570319120260fbf2dcb 100644 --- a/lisa/_assets/kmodules/lisa/utils.h +++ b/lisa/_assets/kmodules/lisa/utils.h @@ -12,4 +12,15 @@ #include "linux/kernel.h" +static inline size_t list_count_elements(struct list_head *head) +{ + struct list_head *pos; + size_t count = 0; + + list_for_each (pos, head) + count++; + + return count; +} + #endif /* _UTILS_H */ diff --git a/lisa/_assets/kmodules/lisa/wq.c b/lisa/_assets/kmodules/lisa/wq.c index e9cbb54847eff4c16e24c8606dcd505252f0c5b9..f8510021288c242b02bf58eb4e188e40786674bc 100644 --- a/lisa/_assets/kmodules/lisa/wq.c +++ b/lisa/_assets/kmodules/lisa/wq.c @@ -22,7 +22,17 @@ static void worker(struct work_struct* work) { queue_delayed_work(item->__wq, &item->__dwork, delay); } -struct work_item *start_work(worker_t f, int delay, void *data) { +static __always_inline void __start_work(struct work_item *item) +{ + if (item->__cpu < 0) + /* cpu-unbound work - try to use local */ + queue_delayed_work(item->__wq, &item->__dwork, item->__delay); + else + queue_delayed_work_on(item->__cpu, item->__wq, &item->__dwork, + item->__delay); +} + +struct work_item *start_work_on(worker_t f, int delay, int cpu, void *data) { struct work_item *item; struct workqueue_struct *wq = FEATURE(__worqueue)->data; if (!wq) @@ -33,15 +43,27 @@ struct work_item *start_work(worker_t f, int delay, void *data) { item->f = f; item->data = data; + item->__cpu = cpu; item->__delay = delay; item->__wq = wq; INIT_DELAYED_WORK(&item->__dwork, worker); - queue_delayed_work(wq, &item->__dwork, delay); + __start_work(item); } return item; } +void restart_work(struct work_item *item, int delay) +{ + struct workqueue_struct *wq = FEATURE(__worqueue)->data; + + if (!wq || !item) + return; + + item->__delay = delay; + __start_work(item); +} + int destroy_work(struct work_item *item) { if (item) { cancel_delayed_work_sync(&item->__dwork); diff --git a/lisa/_assets/kmodules/lisa/wq.h b/lisa/_assets/kmodules/lisa/wq.h index 42fc5e0ea0f7aac8b27d99fcc320da7c58d831cf..07269d29ebf062a89b942a24ea36b0a1872a4a7f 100644 --- a/lisa/_assets/kmodules/lisa/wq.h +++ b/lisa/_assets/kmodules/lisa/wq.h @@ -36,6 +36,8 @@ struct work_item { worker_t f; void *data; + /* CPU to queue the work on (-1 for cpu-unbound) */ + int __cpu; /* Workqueue the item got scheduled on */ struct workqueue_struct *__wq; /* Delayed work from kernel workqueue API */ @@ -44,6 +46,20 @@ struct work_item { int __delay; }; +/** + * start_work_on() - Start a worker on a workqueue + * @f: User function of the worker. + * @delay: An amount of time (in jiffies) to wait before queueing the work + * @cpu: cpu id to queue the work on + * @data: void * passed to f() + * + * Context: The __workqueue feature must be enabled using + * ENABLE_FEATURE(__workqueue) before starting any work. + * + * Return struct work_item* to be passed to destroy_work(). + */ +struct work_item *start_work_on(worker_t f, int delay, int cpu, void *data); + /** * start_work() - Start a worker on a workqueue * @f: User function of the worker. @@ -54,7 +70,21 @@ struct work_item { * * Return struct work_item* to be passed to destroy_work(). */ -struct work_item *start_work(worker_t f, int delay, void *data); +static __always_inline +struct work_item *start_work(worker_t f, int delay, void *data) +{ + return start_work_on(f, delay, -1, data); +} + +/** + * restart_work() - Queue existing worker + * @wi - An existing struct work_item instance to queue + * @delay - An amount of time (in jiffies) to wait before queueing the work + * + * Context: The struct work_item should be properly initialised prior to + * re-queueing on a dedicated workqueue. + */ +void restart_work(struct work_item *wi, int delay); /** * destroy_work() - Stop a work item and deallocate it. diff --git a/lisa/_cli_tools/lisa_load_kmod.py b/lisa/_cli_tools/lisa_load_kmod.py index db914aa1905b896a517ae82d737df0a5ce1979ca..183762d01023930aca998500aefe6c4d0d65c7ec 100755 --- a/lisa/_cli_tools/lisa_load_kmod.py +++ b/lisa/_cli_tools/lisa_load_kmod.py @@ -55,15 +55,11 @@ def _main(args, target): if cmd and cmd[0] == '--': cmd = cmd[1:] - kmod_params = {} - if features is not None: - kmod_params['features'] = list(features) - kmod = target.get_kmod(LISADynamicKmod) pretty_events = ', '.join(kmod.defined_events) logging.info(f'Kernel module provides the following ftrace events: {pretty_events}') - _kmod_cm = kmod.run(kmod_params=kmod_params) + _kmod_cm = kmod.run() if keep_loaded: @contextlib.contextmanager diff --git a/lisa/_kmod.py b/lisa/_kmod.py index cb0a6c57ab5cbfcbe764422810eccb9bfda0a960..746975d7490e90336b8ddff59c1a573e4eea5c7d 100644 --- a/lisa/_kmod.py +++ b/lisa/_kmod.py @@ -2534,14 +2534,9 @@ class DynamicKmod(Loggable): return (bin_, kernel_build_env._to_spec()) - def install(self, kmod_params=None): + def install(self): """ Install and load the module on the target. - - :param kmod_params: Parameters to pass to the module via ``insmod``. - Non-string iterable values will be turned into a comma-separated - string following the ``module_param_array()`` kernel API syntax. - :type kmod_params: dict(str, object) or None """ target = self.target @@ -2565,9 +2560,9 @@ class DynamicKmod(Loggable): finally: target.remove(str(target_temp)) - return self._install(kmod_cm(), kmod_params=kmod_params) + return self._install(kmod_cm()) - def _install(self, kmod_cm, kmod_params): + def _install(self, kmod_cm): # Avoid circular import from lisa.trace import DmesgCollector @@ -2596,15 +2591,6 @@ class DynamicKmod(Loggable): logger = self.logger target = self.target - kmod_params = kmod_params or {} - params = ' '.join( - f'{quote(k)}={quote(make_str(v))}' - for k, v in sorted( - kmod_params.items(), - key=itemgetter(0), - ) - ) - try: self.uninstall() except Exception: @@ -2619,7 +2605,7 @@ class DynamicKmod(Loggable): try: with dmesg_coll as dmesg_coll: - target.execute(f'{quote(target.busybox)} insmod {quote(str(ko_path))} {params}', as_root=True) + target.execute(f'{quote(target.busybox)} insmod {quote(str(ko_path))}', as_root=True) except Exception as e: log_dmesg(dmesg_coll, logger.error) @@ -2631,18 +2617,89 @@ class DynamicKmod(Loggable): else: log_dmesg(dmesg_coll, logger.debug) + self.mount_lisa_fs() + + def mount_lisa_fs(self): + """ + Mount lisa_fs on mount_path. + """ + # TODO android: + self.lisa_fs_path = Path("/data/local/lisa") + # TODO: mainline: + # self.lisa_fs_path = Path("/lisa") + self.target.execute(f'mkdir -p {self.lisa_fs_path}') + self.target.execute(f'mount -t lisa none {self.lisa_fs_path}') + def uninstall(self): """ Unload the module from the target. """ mod = quote(self.mod_name) execute = self.target.execute + self.umount_lisa_fs() try: execute(f'rmmod {mod}') except TargetStableError: execute(f'rmmod -f {mod}') + def umount_lisa_fs(self): + """ + Mount lisa_fs on mount_path. + """ + self.target.execute(f'umount {self.lisa_fs_path}') + self.target.execute(f'rmdir {self.lisa_fs_path}') + + def setup_config(self, cfg_name=None, features=None): + """ + config is a dict: { "cfg_name": { "feature": ["asd"] } } + """ + # Create the config file + cfg_path = self.lisa_fs_path / "configs" / cfg_name + self.target.execute(f'mkdir {cfg_path}') + + # Write the config + if features: + for f in features: + self.target.execute(f'echo {f} >> {cfg_path / "set_features" }') + + if not features[f]: + continue + + for arg in features[f]: + for val in features[f][arg]: + self.target.execute(f'echo {val} >> {cfg_path / f / arg}') + + # Enable the config + self.target.execute(f'echo 1 > {cfg_path / "activate"}') + + def teardown_config(self, cfg_name=None, features=None): + cfg_path = self.lisa_fs_path / "configs" / cfg_name + + if self.target.execute(f'test -d {cfg_path}'): + return + + self.target.execute(f'rmdir {cfg_path}') + + @destroyablecontextmanager + def with_features(self, **kwargs): + try: + self.teardown_config(**kwargs) + except Exception: + pass + + x = self.setup_config(**kwargs) + try: + yield x + except ContextManagerExit: + self.teardown_config(**kwargs) + + def enable_feature(self, cfg_name, features): + cfg_path = self.lisa_fs_path / cfg_name + self.target.execute(f'mkdir {cfg_path}') + for f in features: + self.target.execute(f'echo {cfg_path}') + @destroyablecontextmanager def run(self, **kwargs): """ @@ -2792,6 +2849,14 @@ class LISADynamicKmod(FtraceDynamicKmod): **kwargs, ) + def _event_features_dict(self, events): + all_events = self.defined_events + return { + event: f'event__{event}' + for pattern in events + for event in fnmatch.filter(all_events, pattern) + } + def _event_features(self, events): all_events = self.defined_events return set( @@ -2800,7 +2865,7 @@ class LISADynamicKmod(FtraceDynamicKmod): for event in fnmatch.filter(all_events, pattern) ) - def install(self, kmod_params=None): + def install(self): target = self.target logger = self.logger @@ -2824,17 +2889,13 @@ class LISADynamicKmod(FtraceDynamicKmod): base_path = f"{modules_path_base}/{modules_version}" return (base_path, f"{self.mod_name}.ko") - - kmod_params = kmod_params or {} - kmod_params['version'] = self.src.checksum - base_path, kmod_filename = guess_kmod_path() logger.debug(f'Looking for pre-installed {kmod_filename} module in {base_path}') super_ = super() def preinstalled_broken(e): logger.debug(f'Pre-installed {kmod_filename} is unsuitable, recompiling: {e}') - return super_.install(kmod_params=kmod_params) + return super_.install() try: kmod_path = target.execute( @@ -2853,7 +2914,7 @@ class LISADynamicKmod(FtraceDynamicKmod): yield kmod_path try: - ret = self._install(kmod_cm(), kmod_params=kmod_params) + ret = self._install(kmod_cm()) except (TargetStableCalledProcessError, KmodVersionError) as e: ret = preinstalled_broken(e) else: diff --git a/lisa/tests/base.py b/lisa/tests/base.py index 9a1870954732eb70a6d596603f1ce92a16c52c08..a9c2543edc34fb9922f4ecff5c5236865238c4e0 100644 --- a/lisa/tests/base.py +++ b/lisa/tests/base.py @@ -34,10 +34,11 @@ from operator import attrgetter import typing from datetime import datetime -from collections import OrderedDict, ChainMap +from collections import OrderedDict, ChainMap, defaultdict, Counter from collections.abc import Mapping from inspect import signature +import pandas as pd import IPython.display from devlib.collector.dmesg import KernelLogEntry @@ -57,7 +58,9 @@ from lisa.utils import ( dispatch_kwargs, Loggable, kwargs_forwarded_to, docstring_update, is_running_ipython, ) -from lisa.datautils import df_filter_task_ids + +from lisa.datautils import df_filter_task_ids, df_window + from lisa.trace import FtraceCollector, FtraceConf, DmesgCollector, ComposedCollector from lisa.conf import ( SimpleMultiSrcConf, KeyDesc, TopLevelKeyDesc, @@ -1860,6 +1863,135 @@ class RTATestBundle(FtraceTestBundle, DmesgTestBundle): return res + def df_estimated_freq(self, tasks, window=None): + """ + Provide an estimated CPU(s) frequency + + :pram tasks: Set of tasks to take into account when providing the + estimates + type tasks: list(lisa.trace.TaskID) + + :param window: Optional, restrict the data to given window only + :type window: tuple(float, float) + + :returns: a :class:`pandas.DataFrame` with + + * CPU id as index + * A ``runtime`` column with total runtime reported on given CPU + * A ``counter`` column with CPU_CYCLES event counter + * A ``freq``column with the estimated frequency + """ + try: + df_perf = self.trace.df_event('perf_counter') + except: + return pd.DataFrame() + + # CPU_CYCLES counter: + # ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011 + # ARMV[6/7]_PERFCTR_CPU_CYCLES 0xFF + df_perf = df_perf.query('counter_id == 17 or counter_id == 255').copy(deep=False) + + if df_perf.empty: + self.logger.warning("CPU_CYCLES event counter missing") + return df_perf + + + d_perf = defaultdict(Counter) + + for task in tasks: + + def skip_task(task): + return not task.pid != 0 + + if skip_task(task): + continue + + try: + df_act = self.trace.ana.tasks.df_task_activation(task) + df_act = df_act.query('active == 1').copy(deep=False) + except: + continue + + if window is not None: + df_act = df_window(df_act, window, method='inclusive') + + def __map_perf_events(entry, df_perf): + # Find corresponding events for sched_switch ones (activation) + df = df_perf.query('cpu == @entry.cpu') + + __loc = df.index.get_indexer( + [entry.name + entry.duration], + method='nearest' + ) + + value = df.iloc[__loc[0]].value + __loc = df.index.get_indexer( + [entry.name], + method='nearest' + ) + value -= df.iloc[__loc[0]].value + return value + + df_act['counter'] = df_act.apply( + lambda x: __map_perf_events(x, df_perf), axis = 1 + ) + + for cpu, group_df in df_act.groupby('cpu'): + d_perf[cpu]['runtime'] = group_df.duration.sum() + d_perf[cpu]['counter'] = group_df.counter.sum() + + df_freq = pd.DataFrame(d_perf).T + df_freq.index.name = 'cpu' + df_freq['freq'] = df_freq['counter'] / df_freq['runtime'] / 1000 + return df_freq + + @requires_events('perf_counter') + @TestBundleBase.add_undecided_filter + @TasksAnalysis.df_task_activation.used_events + def test_estiamted_freq(self, skip_verification=False, freq=None): + """ + Verify expected frequency for given CPUs + + :param skip_verification: Do not perform any validation + :type skip_verification: bool + + :param freq: Expected frequency to validate estimated one against + :type freq: float + + If freq is not specified, the estimated frequency is validated against + the maximum one for given CPUs. + """ + df = self.df_estimated_freq(self.rtapp_task_ids, self.trace.window) + + if skip_verification: + res = ResultBundle.from_bool(True) + res.add_metric("estimated frequencies", df.T.to_dict()) + return res + + if df.empty: + # Do not compromise the test if smth went wrong with setting up + # the counters + res = ResultBundle.from_bool(True) + self.logger.warning("Unable to estimate frequency") + return res + + cpus = df.index.values + + def __get_cpu_freq(cpu, freq): + return cpu, freq if freq is not None else self.plat_info['freqs'][cpu][-1] + + df_expected = pd.DataFrame({ + __get_cpu_freq(cpu, freq) for cpu in cpus + }, columns=['cpu', 'freq']).set_index('cpu') + + df = df[['freq']] + df_expected = df / df_expected + + df.reset_index(inplace=True) + res = ResultBundle.from_bool(df_expected.query('freq < 0.9').empty) + res.add_metric("estimated frequencies", df.T.to_dict()) + return res + @classmethod def unscaled_utilization(cls, plat_info, cpu, utilization_pct): """ diff --git a/lisa/trace.py b/lisa/trace.py index 8d3f6610f6a6d92828c0258619ca322496832052..b1f6ca4f2f2793e687e9165f4c44268ca10f691b 100644 --- a/lisa/trace.py +++ b/lisa/trace.py @@ -5889,7 +5889,7 @@ class FtraceCollector(CollectorBase, Configurable): TOOLS = ['trace-cmd'] _COMPOSITION_ORDER = 0 - def __init__(self, target, *, events=None, functions=None, buffer_size=10240, output_path=None, autoreport=False, trace_clock=None, saved_cmdlines_nr=8192, tracer=None, kmod_auto_load=True, events_namespaces=('lisa', None), **kwargs): + def __init__(self, target, *, events=None, functions=None, buffer_size=10240, output_path=None, autoreport=False, trace_clock=None, saved_cmdlines_nr=8192, tracer=None, kmod_auto_load=True, events_namespaces=('lisa', None), kmod_features=None, **kwargs): kconfig = target.plat_info['kernel']['config'] if not kconfig.get('FTRACE'): @@ -5949,9 +5949,12 @@ class FtraceCollector(CollectorBase, Configurable): } events_checker = events_checker.map(rewrite) + events_checker = events_checker.expand_namespaces(namespaces=events_namespaces) + # Expand the wildcards after having expanded the namespaces. events_checker = events_checker.map(wildcard) + self.logger.debug(f'Will try to collect events: {events_checker}') # Select the events, after having expanded the namespaces @@ -5981,8 +5984,14 @@ class FtraceCollector(CollectorBase, Configurable): # in custom modules needed_from_kmod = kmod_available_events & events + # Create an empty config if no config was provided. + # TODO: 'perf_counter' won't work, need to provide 'lisa__perf_counter' + if not kmod_features: + kmod_features = {} + kmod_defined_events = set() kmod_cm = None + kmod_feat_cm = None if needed_from_kmod: # If anything wrong happens, we will be restricted to the events # already available. @@ -5991,10 +6000,11 @@ class FtraceCollector(CollectorBase, Configurable): if kmod_auto_load: self.logger.info(f'Building kernel module to try to provide the following events that are not currently available on the target: {", ".join(sorted(needed_from_kmod))}') try: - kmod_defined_events, provided, kmod_cm = self._get_kmod( + kmod_defined_events, provided, kmod_cm, kmod_feat_cm = self._get_kmod( target, target_available_events=target_available_events, needed_events=needed_from_kmod, + kmod_features=kmod_features ) except Exception as e: try: @@ -6025,6 +6035,7 @@ class FtraceCollector(CollectorBase, Configurable): ) self._kmod_cm = kmod_cm + self._kmod_feat_cm = kmod_feat_cm ############################################ # Final checks after we enabled all we could @@ -6090,7 +6101,7 @@ class FtraceCollector(CollectorBase, Configurable): super().__init__(collector, output_path=output_path) @classmethod - def _get_kmod(cls, target, target_available_events, needed_events): + def _get_kmod(cls, target, target_available_events, needed_events, kmod_features): logger = cls.get_logger() kmod = target.get_kmod(LISADynamicKmod) defined_events = set(kmod.defined_events) @@ -6105,25 +6116,39 @@ class FtraceCollector(CollectorBase, Configurable): if overlapping: raise ValueError(f'Events defined in {mod.src.mod_name} ({", ".join(needed)}) are needed but some events overlap with the ones already provided by the kernel: {", ".join(overlapping)}') else: + + # Update the name of the needed features and give them an empty config. + feat_dict = kmod._event_features_dict(needed) + needed_kmod_features = {feat: None for feat in kmod._event_features(needed)} + # If a config is provided, replace the empty one. + needed_kmod_features.update({feat_dict[feat]: kmod_features[feat] for feat in kmod_features.keys()}) + + kmod_feat_config = functools.partial( + kmod.with_features, + cfg_name='lisa_notebook', + features=needed_kmod_features + ) + return ( defined_events, needed, functools.partial( kmod.run, - kmod_params={ - 'features': sorted(kmod._event_features(needed)) - } - ) + ), + kmod_feat_config ) else: - return (defined_events, set(), None) + return (defined_events, set(), None, None) @contextlib.contextmanager def _make_cm(self, record=True): with contextlib.ExitStack() as stack: kmod_cm = self._kmod_cm + kmod_feat_cm = self._kmod_feat_cm if kmod_cm is not None: stack.enter_context(kmod_cm()) + if kmod_feat_cm is not None: + stack.enter_context(kmod_feat_cm()) if record: proxy = super() diff --git a/lisa/wa/plugins/_kmod.py b/lisa/wa/plugins/_kmod.py index 547a9765d5d37ccd385bd25d26fe185d8e556d31..447c558c1ab4e84812f39d34bcf91db57e6e1f07 100644 --- a/lisa/wa/plugins/_kmod.py +++ b/lisa/wa/plugins/_kmod.py @@ -221,11 +221,7 @@ class LisaKmodInstrument(Instrument): def _run(self): features = sorted(self._features) self.logger.info(f'Enabling LISA kmod features {", ".join(features)}') - return self._kmod.run( - kmod_params={ - 'features': features, - } - ) + return self._kmod.run() @contextmanager def _initialize_cm(self, context): diff --git a/lisa_tests/arm/kernel/scheduler/load_tracking.py b/lisa_tests/arm/kernel/scheduler/load_tracking.py index 716d746370027c4b71c6e868595156a91a16b072..6c0fef23eefc4544cb62723ec32cedfff0b2005f 100644 --- a/lisa_tests/arm/kernel/scheduler/load_tracking.py +++ b/lisa_tests/arm/kernel/scheduler/load_tracking.py @@ -411,6 +411,7 @@ class InvarianceItemBase(RTATestBundle, LoadTrackingHelpers, TestBundle, Exekall @memoized @get_simulated_pelt.used_events + @RTATestBundle.test_estiamted_freq.undecided_filter(skip_verification=True) @RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1) def _test_correctness(self, signal_name, mean_error_margin_pct, max_error_margin_pct): @@ -711,6 +712,7 @@ class TaskInvariance(InvarianceBase): @memoized @_test_behaviour.used_events + @RTATestBundle.test_estiamted_freq.undecided_filter(skip_verification=True) @RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1) def test_util_behaviour(self, error_margin_pct=5) -> ResultBundle: """ @@ -730,6 +732,7 @@ class TaskInvariance(InvarianceBase): @memoized @_test_behaviour.used_events + @RTATestBundle.test_estiamted_freq.undecided_filter(skip_verification=True) @RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1) def test_load_behaviour(self, error_margin_pct=5) -> ResultBundle: """ diff --git a/lisa_tests/arm/kernel/scheduler/util_tracking.py b/lisa_tests/arm/kernel/scheduler/util_tracking.py index ecd5c0df5246179429c5caccc267f7e8454d7730..d7bb9c435bd5c5e5aae34bee97f67d21537732a3 100644 --- a/lisa_tests/arm/kernel/scheduler/util_tracking.py +++ b/lisa_tests/arm/kernel/scheduler/util_tracking.py @@ -178,9 +178,10 @@ class UtilConvergence(UtilTrackingBase): self._save_debug_plot(fig, name=f'util_est_{test}') return fig - @requires_events('sched_util_est_se') + @requires_events('sched_util_est_se', 'perf_counter') @LoadTrackingAnalysis.df_tasks_signal.used_events @RTAEventsAnalysis.task_phase_windows.used_events + @RTATestBundle.test_estiamted_freq.undecided_filter() @RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1) def test_means(self) -> ResultBundle: """ @@ -227,6 +228,10 @@ class UtilConvergence(UtilTrackingBase): apply_phase_window = functools.partial(df_refit_index, window=(phase.start, phase.end)) ue_phase_df = apply_phase_window(ue_df) + if ue_phase_df.shape[0] <= 1: + self.logger.warning(f"Not enough events recorded for phase {phase.id} to proceed") + continue + mean_enqueued = series_mean(ue_phase_df['enqueued']) mean_ewma = series_mean(ue_phase_df['ewma'])