From 93c8f34e1cd620e4c819843d6d62191abddd4928 Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Mon, 16 Dec 2019 18:29:50 +0000 Subject: [PATCH 01/10] juno: Remove unused rate address from CDCEL937 clock driver The CDCEL937 clock driver stores the address to return the clock rate in the clock device context before it creates an event to read the rate. If the get_rate() event is not created because another request is already in progress the target address will have been overwritten. Remove the unused rate address from asynchronous request code path. Change-Id: I56da6ab26bf8f752a7d7500b6fd8fcf2fa45eca3 Signed-off-by: Jim Quigley --- product/juno/module/juno_cdcel937/src/juno_cdcel937.h | 1 - product/juno/module/juno_cdcel937/src/mod_juno_cdcel937.c | 5 ----- 2 files changed, 6 deletions(-) diff --git a/product/juno/module/juno_cdcel937/src/juno_cdcel937.h b/product/juno/module/juno_cdcel937/src/juno_cdcel937.h index 288d3eb59..e9044fc46 100644 --- a/product/juno/module/juno_cdcel937/src/juno_cdcel937.h +++ b/product/juno/module/juno_cdcel937/src/juno_cdcel937.h @@ -241,7 +241,6 @@ struct juno_cdcel937_dev_ctx { struct pll_cfg_reg pll_config; struct cfg_reg_y1 y1_config; uint64_t rate; - uint64_t *rate_addr; int index; }; diff --git a/product/juno/module/juno_cdcel937/src/mod_juno_cdcel937.c b/product/juno/module/juno_cdcel937/src/mod_juno_cdcel937.c index 17f5697d8..fdf37a2a4 100644 --- a/product/juno/module/juno_cdcel937/src/mod_juno_cdcel937.c +++ b/product/juno/module/juno_cdcel937/src/mod_juno_cdcel937.c @@ -714,7 +714,6 @@ static int juno_cdcel937_get_rate(fwk_id_t clock_id, ctx = &ctx_table[fwk_id_get_element_idx(clock_id)]; if (ctx->rate_set == false) { - ctx->rate_addr = rate; status = create_get_rate_request(clock_id, ctx); if ((status != FWK_PENDING) && (status != FWK_SUCCESS)) return FWK_E_DEVICE; @@ -1110,10 +1109,6 @@ static int juno_cdcel937_process_event(const struct fwk_event *event, module_ctx.state = JUNO_CDCEL937_DEVICE_IDLE; response_param.status = status; response_param.value.rate = ctx->rate_hz; - if (ctx->rate_addr) { - *(ctx->rate_addr) = ctx->rate; - ctx->rate_addr = NULL; - } ctx->driver_response_api->request_complete(ctx->config->clock_hal_id, &response_param); -- GitLab From 155d84ebbf6142771af7699fcd807cec4b68cd50 Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Tue, 17 Dec 2019 10:46:34 +0000 Subject: [PATCH 02/10] juno: Fix clock-id handling in HDLCD driver There is a bug in the HDLCD clock driver when it stores the ID of the clock which is currently waiting for a PENDING request. The clock ID is stored once for the module, so if requests are initiated for multiple devices the clock ID of the last request will overwrite the clock ID of the request which is actually in progress. So we fix this by handling only one request at a time. Change-Id: I9abbd572954b328da7123312f288a6cededeb98b Signed-off-by: Jim Quigley --- product/juno/module/juno_hdlcd/src/mod_juno_hdlcd.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/product/juno/module/juno_hdlcd/src/mod_juno_hdlcd.c b/product/juno/module/juno_hdlcd/src/mod_juno_hdlcd.c index 89a94a0d8..42820fea1 100644 --- a/product/juno/module/juno_hdlcd/src/mod_juno_hdlcd.c +++ b/product/juno/module/juno_hdlcd/src/mod_juno_hdlcd.c @@ -142,6 +142,8 @@ void juno_hdlcd_request_complete(fwk_id_t dev_id, ctx->driver_response_api->request_complete(ctx->config->clock_hal_id, response_param); + + module_ctx.request_clock_id = FWK_ID_NONE; } static const struct mod_clock_driver_response_api hdlcd_driver_response_api = { @@ -171,6 +173,11 @@ static int juno_hdlcd_set_rate(fwk_id_t clock_id, uint64_t rate, (rounded_rate > ctx->config->max_rate)) return FWK_E_RANGE; + if (platform == JUNO_IDX_PLATFORM_RTL) { + if (!fwk_id_is_equal(module_ctx.request_clock_id, FWK_ID_NONE)) + return FWK_E_BUSY; + } + /* * Clock rate is always twice the pixel clock rate. This is because Juno has * an implicit "divide by 2" stage. @@ -233,7 +240,8 @@ static int juno_hdlcd_set_rate(fwk_id_t clock_id, uint64_t rate, "[HDLCD] Failed to set board clock\n"); return FWK_E_DEVICE; } - module_ctx.request_clock_id = clock_id; + if (status == FWK_PENDING) + module_ctx.request_clock_id = clock_id; return status; } @@ -438,6 +446,7 @@ static int juno_hdlcd_start(fwk_id_t id) module_ctx.current_pll_rate = ((uint64_t)(PXL_REF_CLK_RATE) * nf) / (nr * od); + module_ctx.request_clock_id = FWK_ID_NONE; return FWK_SUCCESS; } -- GitLab From 5906b6f645c23b0e24f213a3764065cdd8bbb61c Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Mon, 21 Oct 2019 12:39:23 +0100 Subject: [PATCH 03/10] DVFS: Remove async APIs from DVFS module. The async APIs are not required and should be removed. Change-Id: I09019a79e3451788d8981848ee5ad708baa1f3cc Signed-off-by: Jim Quigley --- module/dvfs/include/mod_dvfs.h | 22 ---------------------- module/dvfs/src/mod_dvfs_domain_api.c | 14 -------------- 2 files changed, 36 deletions(-) diff --git a/module/dvfs/include/mod_dvfs.h b/module/dvfs/include/mod_dvfs.h index 2c4a672ec..f696f0d58 100644 --- a/module/dvfs/include/mod_dvfs.h +++ b/module/dvfs/include/mod_dvfs.h @@ -149,16 +149,6 @@ struct mod_dvfs_domain_api { */ int (*set_frequency)(fwk_id_t domain_id, uint64_t frequency); - /*! - * \brief Set the frequency of a domain. - * - * \note This function is asynchronous. - * - * \param domain_id Element identifier of the domain. - * \param idx Index of the operating point to transition to. - */ - int (*set_frequency_async)(fwk_id_t domain_id, uint64_t frequency); - /*! * \brief Get the frequency of a domain. * @@ -178,18 +168,6 @@ struct mod_dvfs_domain_api { int (*set_frequency_limits)( fwk_id_t domain_id, const struct mod_dvfs_frequency_limits *limits); - - /*! - * \brief Set the frequency of a domain. - * - * \note This function is asynchronous. - * - * \param domain_id Element identifier of the domain. - * \param limits Pointer to the new limits. - */ - int (*set_frequency_limits_async)( - fwk_id_t domain_id, - const struct mod_dvfs_frequency_limits *limits); }; /*! diff --git a/module/dvfs/src/mod_dvfs_domain_api.c b/module/dvfs/src/mod_dvfs_domain_api.c index 7e7644235..fbdb214d4 100644 --- a/module/dvfs/src/mod_dvfs_domain_api.c +++ b/module/dvfs/src/mod_dvfs_domain_api.c @@ -194,11 +194,6 @@ static int api_set_frequency(fwk_id_t domain_id, uint64_t frequency) return FWK_SUCCESS; } -static int api_set_frequency_async(fwk_id_t domain_id, uint64_t frequency) -{ - return FWK_E_SUPPORT; -} - int api_get_frequency_limits( fwk_id_t domain_id, struct mod_dvfs_frequency_limits *limits) @@ -244,13 +239,6 @@ static int api_set_frequency_limits( return FWK_SUCCESS; } -static int api_set_frequency_limits_async( - fwk_id_t domain_id, - const struct mod_dvfs_frequency_limits *limits) -{ - return FWK_E_SUPPORT; -} - const struct mod_dvfs_domain_api __mod_dvfs_domain_api = { .get_current_opp = api_get_current_opp, .get_sustained_opp = api_get_sustained_opp, @@ -258,8 +246,6 @@ const struct mod_dvfs_domain_api __mod_dvfs_domain_api = { .get_opp_count = api_get_opp_count, .get_latency = api_get_latency, .set_frequency = api_set_frequency, - .set_frequency_async = api_set_frequency_async, .get_frequency_limits = api_get_frequency_limits, .set_frequency_limits = api_set_frequency_limits, - .set_frequency_limits_async = api_set_frequency_limits_async, }; -- GitLab From ef63ea112c8eb99f58d8d425b8b67927a76bf653 Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Tue, 22 Oct 2019 11:15:18 +0100 Subject: [PATCH 04/10] DVFS: Refactor DVFS module Refactor the DVFS module to comply with updated standards and rules. Change-Id: I7821acddd7fa670cf326572dfed3493d34e87dbc Signed-off-by: Jim Quigley --- module/dvfs/src/Makefile | 6 +- module/dvfs/src/mod_dvfs.c | 618 ++++++++++++++++++ module/dvfs/src/mod_dvfs_domain_api.c | 251 ------- module/dvfs/src/mod_dvfs_domain_api_private.h | 16 - module/dvfs/src/mod_dvfs_event.c | 47 -- module/dvfs/src/mod_dvfs_event_private.h | 18 - module/dvfs/src/mod_dvfs_module.c | 235 ------- module/dvfs/src/mod_dvfs_module_private.h | 40 -- module/dvfs/src/mod_dvfs_private.h | 16 - module/dvfs/src/mod_dvfs_util.c | 71 -- module/dvfs/src/mod_dvfs_util_private.h | 22 - 11 files changed, 619 insertions(+), 721 deletions(-) create mode 100644 module/dvfs/src/mod_dvfs.c delete mode 100644 module/dvfs/src/mod_dvfs_domain_api.c delete mode 100644 module/dvfs/src/mod_dvfs_domain_api_private.h delete mode 100644 module/dvfs/src/mod_dvfs_event.c delete mode 100644 module/dvfs/src/mod_dvfs_event_private.h delete mode 100644 module/dvfs/src/mod_dvfs_module.c delete mode 100644 module/dvfs/src/mod_dvfs_module_private.h delete mode 100644 module/dvfs/src/mod_dvfs_private.h delete mode 100644 module/dvfs/src/mod_dvfs_util.c delete mode 100644 module/dvfs/src/mod_dvfs_util_private.h diff --git a/module/dvfs/src/Makefile b/module/dvfs/src/Makefile index 433bbcc31..9e63ba5d9 100644 --- a/module/dvfs/src/Makefile +++ b/module/dvfs/src/Makefile @@ -6,10 +6,6 @@ # BS_LIB_NAME := dvfs -BS_LIB_SOURCES := \ - mod_dvfs_domain_api.c \ - mod_dvfs_event.c \ - mod_dvfs_module.c \ - mod_dvfs_util.c \ +BS_LIB_SOURCES := mod_dvfs.c include $(BS_DIR)/lib.mk diff --git a/module/dvfs/src/mod_dvfs.c b/module/dvfs/src/mod_dvfs.c new file mode 100644 index 000000000..397c57d67 --- /dev/null +++ b/module/dvfs/src/mod_dvfs.c @@ -0,0 +1,618 @@ +/* + * Arm SCP/MCP Software + * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Domain context */ +struct mod_dvfs_domain_ctx { + /* Domain configuration */ + const struct mod_dvfs_domain_config *config; + + struct { + /* Power supply API */ + const struct mod_psu_device_api *psu; + + /* Clock API */ + const struct mod_clock_api *clock; + } apis; + + /* Number of operating points */ + size_t opp_count; + + /* Operating point prior to domain suspension */ + struct mod_dvfs_opp suspended_opp; + + /* Current operating point limits */ + struct mod_dvfs_frequency_limits frequency_limits; +}; + +static struct mod_dvfs_domain_ctx (*domain_ctx)[]; + +static struct mod_dvfs_domain_ctx *get_domain_ctx(fwk_id_t domain_id) +{ + return &(*domain_ctx)[fwk_id_get_element_idx(domain_id)]; +} + +struct mod_dvfs_domain_ctx *__mod_dvfs_get_valid_domain_ctx(fwk_id_t domain_id) +{ + return get_domain_ctx(domain_id); +} + + +/* + * DVFS Helper Functions + */ +static int +count_opps(const struct mod_dvfs_opp *opps) +{ + const struct mod_dvfs_opp *opp = &opps[0]; + + while ((opp->voltage != 0) && (opp->frequency != 0)) + opp++; + + return opp - &opps[0]; +} + +static const struct mod_dvfs_opp *get_opp_for_values( + const struct mod_dvfs_domain_ctx *ctx, + uint64_t frequency, + uint64_t voltage) +{ + size_t opp_idx; + const struct mod_dvfs_opp *opp; + + /* A value of zero indicates the parameter should be ignored */ + assert((frequency != 0) || (voltage != 0)); + + for (opp_idx = 0; opp_idx < ctx->opp_count; opp_idx++) { + opp = &ctx->config->opps[opp_idx]; + + /* Only check the frequency if requested */ + if ((frequency != 0) && (opp->frequency != frequency)) + continue; + + /* Only check the voltage if requested */ + if ((voltage != 0) && (opp->voltage != voltage)) + continue; + + return opp; + } + + return NULL; +} + +static bool +is_opp_within_limits(const struct mod_dvfs_opp *opp, + const struct mod_dvfs_frequency_limits *limits) +{ + return (opp->frequency >= limits->minimum) && + (opp->frequency <= limits->maximum); +} + +static bool +are_limits_valid(const struct mod_dvfs_domain_ctx *ctx, + const struct mod_dvfs_frequency_limits *limits) +{ + if (limits->minimum > limits->maximum) + return false; + + if (get_opp_for_values(ctx, limits->minimum, 0) == NULL) + return false; + + if (get_opp_for_values(ctx, limits->maximum, 0) == NULL) + return false; + + return true; +} + +static const struct +mod_dvfs_opp *adjust_opp_for_new_limits( + const struct mod_dvfs_domain_ctx *ctx, + const struct mod_dvfs_opp *opp, + const struct mod_dvfs_frequency_limits *limits) +{ + uint64_t needle; + + if (opp->frequency < limits->minimum) + needle = limits->minimum; + else if (opp->frequency > limits->maximum) + needle = limits->maximum; + else { + /* No transition necessary */ + return opp; + } + + return get_opp_for_values(ctx, needle, 0); +} + +/* + * DVFS Utilities + */ +static int +__mod_dvfs_get_current_opp(const struct mod_dvfs_domain_ctx *ctx, + struct mod_dvfs_opp *opp) +{ + int status; + + status = ctx->apis.clock->get_rate( + ctx->config->clock_id, + &opp->frequency); + if (status != FWK_SUCCESS) + return FWK_E_DEVICE; + + status = ctx->apis.psu->get_voltage( + ctx->config->psu_id, + &opp->voltage); + if (status != FWK_SUCCESS) + return FWK_E_DEVICE; + + return FWK_SUCCESS; +} + +static int +__mod_dvfs_set_opp(const struct mod_dvfs_domain_ctx *ctx, + const struct mod_dvfs_opp *new_opp) +{ + int status; + struct mod_dvfs_opp current_opp; + + status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); + if (status != FWK_SUCCESS) + return status; + + if (new_opp->voltage > current_opp.voltage) { + /* Raise the voltage before raising the frequency */ + status = ctx->apis.psu->set_voltage( + ctx->config->psu_id, + new_opp->voltage); + if (status != FWK_SUCCESS) + return FWK_E_DEVICE; + } + + if (new_opp->frequency != current_opp.frequency) { + status = ctx->apis.clock->set_rate( + ctx->config->clock_id, + new_opp->frequency, + MOD_CLOCK_ROUND_MODE_NONE); + if (status != FWK_SUCCESS) + return FWK_E_DEVICE; + } + + if (new_opp->voltage < current_opp.voltage) { + /* Lower the voltage after lowering the frequency */ + status = ctx->apis.psu->set_voltage( + ctx->config->psu_id, + new_opp->voltage); + if (status != FWK_SUCCESS) + return FWK_E_DEVICE; + } + + return FWK_SUCCESS; +} +/* + * DVFS Domain API Support + */ +static int +api_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) +{ + int status; + const struct mod_dvfs_domain_ctx *ctx; + + fwk_assert(opp != NULL); + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + status = __mod_dvfs_get_current_opp(ctx, opp); + if (status != FWK_SUCCESS) + return status; + + return FWK_SUCCESS; +} + +static int +api_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) +{ + const struct mod_dvfs_domain_ctx *ctx; + + fwk_assert(opp != NULL); + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + if (ctx->config->sustained_idx >= ctx->opp_count) + return FWK_E_PARAM; + + *opp = ctx->config->opps[ctx->config->sustained_idx]; + + return FWK_SUCCESS; +} + +static int +api_get_nth_opp(fwk_id_t domain_id, + size_t n, + struct mod_dvfs_opp *opp) +{ + const struct mod_dvfs_domain_ctx *ctx; + + fwk_assert(opp != NULL); + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + if (n >= ctx->opp_count) + return FWK_E_PARAM; + + *opp = ctx->config->opps[n]; + + return FWK_SUCCESS; +} + +static int +api_get_opp_count(fwk_id_t domain_id, size_t *opp_count) +{ + const struct mod_dvfs_domain_ctx *ctx; + + fwk_assert(opp_count != NULL); + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + *opp_count = ctx->opp_count; + + return FWK_SUCCESS; +} + +static int +api_get_latency(fwk_id_t domain_id, uint16_t *latency) +{ + const struct mod_dvfs_domain_ctx *ctx; + + fwk_assert(latency != NULL); + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + *latency = ctx->config->latency; + + return FWK_SUCCESS; +} + +static int +api_set_frequency(fwk_id_t domain_id, uint64_t frequency) +{ + int status; + const struct mod_dvfs_domain_ctx *ctx; + const struct mod_dvfs_opp *new_opp; + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + /* Only accept frequencies that exist in the operating point table */ + new_opp = get_opp_for_values(ctx, frequency, 0); + if (new_opp == NULL) + return FWK_E_RANGE; + + if (!is_opp_within_limits(new_opp, &ctx->frequency_limits)) + return FWK_E_RANGE; + + status = __mod_dvfs_set_opp(ctx, new_opp); + if (status != FWK_SUCCESS) + return status; + + return FWK_SUCCESS; +} + +static int +api_get_frequency_limits(fwk_id_t domain_id, + struct mod_dvfs_frequency_limits *limits) +{ + const struct mod_dvfs_domain_ctx *ctx; + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + *limits = ctx->frequency_limits; + + return FWK_SUCCESS; +} + +static int +api_set_frequency_limits(fwk_id_t domain_id, + const struct mod_dvfs_frequency_limits *limits) +{ + int status; + struct mod_dvfs_domain_ctx *ctx; + struct mod_dvfs_opp current_opp; + const struct mod_dvfs_opp *new_opp; + + ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + if (!are_limits_valid(ctx, limits)) + return FWK_E_PARAM; + + status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); + if (status != FWK_SUCCESS) + return status; + + new_opp = adjust_opp_for_new_limits(ctx, ¤t_opp, limits); + status = __mod_dvfs_set_opp(ctx, new_opp); + if (status != FWK_SUCCESS) + return status; + + ctx->frequency_limits = *limits; + + return FWK_SUCCESS; +} + +const struct mod_dvfs_domain_api __mod_dvfs_domain_api = { + .get_current_opp = api_get_current_opp, + .get_sustained_opp = api_get_sustained_opp, + .get_nth_opp = api_get_nth_opp, + .get_opp_count = api_get_opp_count, + .get_latency = api_get_latency, + .set_frequency = api_set_frequency, + .get_frequency_limits = api_get_frequency_limits, + .set_frequency_limits = api_set_frequency_limits, +}; +/* + * DVFS Event Handling + */ +static int +event_set_frequency(const struct fwk_event *event, + struct fwk_event *response) +{ + return FWK_E_SUPPORT; +} + +static int +event_set_frequency_limits(const struct fwk_event *event, + struct fwk_event *response) +{ + return FWK_E_SUPPORT; +} + +static int +__mod_dvfs_process_event(const struct fwk_event *event, + struct fwk_event *response) +{ + typedef int (*handler_t)( + const struct fwk_event *event, + struct fwk_event *response); + + static const handler_t handlers[] = { + [MOD_DVFS_EVENT_IDX_SET_FREQUENCY] = event_set_frequency, + [MOD_DVFS_EVENT_IDX_SET_FREQUENCY_LIMITS] = event_set_frequency_limits, + }; + + handler_t handler; + + /* Ensure we have a handler implemented for this event */ + handler = handlers[fwk_id_get_event_idx(event->id)]; + if (handler == NULL) + return FWK_E_PARAM; + + /* Delegate event handling to the relevant handler */ + return handler(event, response); +} + +/* + * DVFS Module Framework Support + */ +static int +dvfs_init(fwk_id_t module_id, unsigned int element_count, + const void *data) +{ + domain_ctx = fwk_mm_calloc(element_count, sizeof((*domain_ctx)[0])); + if (domain_ctx == NULL) + return FWK_E_NOMEM; + + return FWK_SUCCESS; +} + +static int +dvfs_element_init(fwk_id_t domain_id, + unsigned int sub_element_count, + const void *data) +{ + struct mod_dvfs_domain_ctx *ctx = get_domain_ctx(domain_id); + + fwk_assert(sub_element_count == 0); + + /* Initialize the configuration */ + ctx->config = data; + fwk_assert(ctx->config->opps != NULL); + + /* Initialize the context */ + ctx->opp_count = count_opps(ctx->config->opps); + fwk_assert(ctx->opp_count > 0); + + /* Frequency limits default to the minimum and maximum available */ + ctx->frequency_limits = (struct mod_dvfs_frequency_limits) { + .minimum = ctx->config->opps[0].frequency, + .maximum = ctx->config->opps[ctx->opp_count - 1].frequency, + }; + + ctx->suspended_opp = ctx->config->opps[ctx->config->sustained_idx]; + + return FWK_SUCCESS; +} + +static int +dvfs_bind_element(fwk_id_t domain_id, unsigned int round) +{ + int status; + const struct mod_dvfs_domain_ctx *ctx = get_domain_ctx(domain_id); + + /* Only handle the first round */ + if (round > 0) + return FWK_SUCCESS; + + /* Bind to the power supply module */ + status = fwk_module_bind( + ctx->config->psu_id, mod_psu_api_id_device, &ctx->apis.psu); + if (status != FWK_SUCCESS) + return FWK_E_PANIC; + + /* Bind to the clock module */ + status = fwk_module_bind( + ctx->config->clock_id, + FWK_ID_API(FWK_MODULE_IDX_CLOCK, 0), + &ctx->apis.clock); + if (status != FWK_SUCCESS) + return FWK_E_PANIC; + + return FWK_SUCCESS; +} + +static int +dvfs_bind(fwk_id_t id, unsigned int round) +{ + /* We only need to handle binding our elements */ + if (fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) + return dvfs_bind_element(id, round); + + return FWK_SUCCESS; +} + +static int +dvfs_process_bind_request_module(fwk_id_t source_id, + fwk_id_t api_id, + const void **api) +{ + /* Only expose the module API */ + if (!fwk_id_is_equal(api_id, mod_dvfs_api_id_dvfs)) + return FWK_E_PARAM; + + /* We don't do any permissions management */ + *api = &__mod_dvfs_domain_api; + + return FWK_SUCCESS; +} + +static int +dvfs_process_bind_request(fwk_id_t source_id, + fwk_id_t target_id, + fwk_id_t api_id, + const void **api) +{ + /* Only allow binding to the module */ + if (!fwk_id_is_equal(target_id, fwk_module_id_dvfs)) + return FWK_E_PARAM; + + return dvfs_process_bind_request_module(source_id, api_id, api); +} + +static int +dvfs_start(fwk_id_t id) +{ + int status; + const struct mod_dvfs_domain_ctx *ctx; + + if (!fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) + return FWK_SUCCESS; + + ctx = get_domain_ctx(id); + + /* Register for clock state notifications */ + status = fwk_notification_subscribe( + mod_clock_notification_id_state_changed, + ctx->config->clock_id, + id); + if (status != FWK_SUCCESS) + return status; + + return fwk_notification_subscribe( + mod_clock_notification_id_state_change_pending, + ctx->config->clock_id, + id); +} + +static int +dvfs_notify_system_state_transition_suspend(fwk_id_t domain_id) +{ + struct mod_dvfs_domain_ctx *ctx = + get_domain_ctx(domain_id); + + return __mod_dvfs_get_current_opp(ctx, &ctx->suspended_opp); +} + +static int +dvfs_notify_system_state_transition_resume(fwk_id_t domain_id) +{ + const struct mod_dvfs_domain_ctx *ctx = + get_domain_ctx(domain_id); + + return __mod_dvfs_set_opp(ctx, &ctx->suspended_opp); +} + +static int +dvfs_process_notification(const struct fwk_event *event, + struct fwk_event *resp_event) +{ + struct clock_notification_params *params; + struct clock_state_change_pending_resp_params *resp_params; + + fwk_assert( + fwk_id_is_equal( + event->id, + mod_clock_notification_id_state_changed) || + fwk_id_is_equal( + event->id, + mod_clock_notification_id_state_change_pending)); + fwk_assert(fwk_id_is_type(event->target_id, FWK_ID_TYPE_ELEMENT)); + + params = (struct clock_notification_params *)event->params; + + if (fwk_id_is_equal(event->id, mod_clock_notification_id_state_changed)) { + if (params->new_state == MOD_CLOCK_STATE_RUNNING) + return dvfs_notify_system_state_transition_resume(event->target_id); + } else if (params->new_state == MOD_CLOCK_STATE_STOPPED) { + /* DVFS has received the pending change notification */ + resp_params = + (struct clock_state_change_pending_resp_params *)resp_event->params; + resp_params->status = FWK_SUCCESS; + + return dvfs_notify_system_state_transition_suspend(event->target_id); + } + + return FWK_SUCCESS; +} +/* Module description */ +const struct fwk_module module_dvfs = { + .name = "DVFS", + .type = FWK_MODULE_TYPE_HAL, + .init = dvfs_init, + .element_init = dvfs_element_init, + .bind = dvfs_bind, + .process_bind_request = dvfs_process_bind_request, + .process_event = __mod_dvfs_process_event, + .start = dvfs_start, + .process_notification = dvfs_process_notification, + .api_count = MOD_DVFS_API_IDX_COUNT, + .event_count = MOD_DVFS_EVENT_IDX_COUNT, +}; diff --git a/module/dvfs/src/mod_dvfs_domain_api.c b/module/dvfs/src/mod_dvfs_domain_api.c deleted file mode 100644 index fbdb214d4..000000000 --- a/module/dvfs/src/mod_dvfs_domain_api.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static const struct mod_dvfs_opp *get_opp_for_values( - const struct mod_dvfs_domain_ctx *ctx, - uint64_t frequency, - uint64_t voltage) -{ - size_t opp_idx; - const struct mod_dvfs_opp *opp; - - /* A value of zero indicates the parameter should be ignored */ - assert((frequency != 0) || (voltage != 0)); - - for (opp_idx = 0; opp_idx < ctx->opp_count; opp_idx++) { - opp = &ctx->config->opps[opp_idx]; - - /* Only check the frequency if requested */ - if ((frequency != 0) && (opp->frequency != frequency)) - continue; - - /* Only check the voltage if requested */ - if ((voltage != 0) && (opp->voltage != voltage)) - continue; - - return opp; - } - - return NULL; -} - -static bool is_opp_within_limits( - const struct mod_dvfs_opp *opp, - const struct mod_dvfs_frequency_limits *limits) -{ - return (opp->frequency >= limits->minimum) && - (opp->frequency <= limits->maximum); -} - -static bool are_limits_valid( - const struct mod_dvfs_domain_ctx *ctx, - const struct mod_dvfs_frequency_limits *limits) -{ - if (limits->minimum > limits->maximum) - return false; - - if (get_opp_for_values(ctx, limits->minimum, 0) == NULL) - return false; - - if (get_opp_for_values(ctx, limits->maximum, 0) == NULL) - return false; - - return true; -} - -static const struct mod_dvfs_opp *adjust_opp_for_new_limits( - const struct mod_dvfs_domain_ctx *ctx, - const struct mod_dvfs_opp *opp, - const struct mod_dvfs_frequency_limits *limits) -{ - uint64_t needle; - - if (opp->frequency < limits->minimum) - needle = limits->minimum; - else if (opp->frequency > limits->maximum) - needle = limits->maximum; - else { - /* No transition necessary */ - return opp; - } - - return get_opp_for_values(ctx, needle, 0); -} - -static int api_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx; - - assert(opp != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - status = __mod_dvfs_get_current_opp(ctx, opp); - if (status != FWK_SUCCESS) - return status; - - return FWK_SUCCESS; -} - -int api_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) -{ - const struct mod_dvfs_domain_ctx *ctx; - - assert(opp != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - *opp = ctx->config->opps[ctx->config->sustained_idx]; - - return FWK_SUCCESS; -} - -int api_get_nth_opp(fwk_id_t domain_id, - size_t n, - struct mod_dvfs_opp *opp) -{ - const struct mod_dvfs_domain_ctx *ctx; - - assert(opp != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - if (n >= ctx->opp_count) - return FWK_E_PARAM; - - *opp = ctx->config->opps[n]; - - return FWK_SUCCESS; -} - -static int api_get_opp_count(fwk_id_t domain_id, size_t *opp_count) -{ - const struct mod_dvfs_domain_ctx *ctx; - - assert(opp_count != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - *opp_count = ctx->opp_count; - - return FWK_SUCCESS; -} - -int api_get_latency(fwk_id_t domain_id, uint16_t *latency) -{ - const struct mod_dvfs_domain_ctx *ctx; - - assert(latency != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - *latency = ctx->config->latency; - - return FWK_SUCCESS; -} - -static int api_set_frequency(fwk_id_t domain_id, uint64_t frequency) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx; - const struct mod_dvfs_opp *new_opp; - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - /* Only accept frequencies that exist in the operating point table */ - new_opp = get_opp_for_values(ctx, frequency, 0); - if (new_opp == NULL) - return FWK_E_RANGE; - - if (!is_opp_within_limits(new_opp, &ctx->frequency_limits)) - return FWK_E_RANGE; - - status = __mod_dvfs_set_opp(ctx, new_opp); - if (status != FWK_SUCCESS) - return status; - - return FWK_SUCCESS; -} - -int api_get_frequency_limits( - fwk_id_t domain_id, - struct mod_dvfs_frequency_limits *limits) -{ - const struct mod_dvfs_domain_ctx *ctx; - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - *limits = ctx->frequency_limits; - - return FWK_SUCCESS; -} - -static int api_set_frequency_limits( - fwk_id_t domain_id, - const struct mod_dvfs_frequency_limits *limits) -{ - int status; - struct mod_dvfs_domain_ctx *ctx; - struct mod_dvfs_opp current_opp; - const struct mod_dvfs_opp *new_opp; - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - if (!are_limits_valid(ctx, limits)) - return FWK_E_PARAM; - - status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); - if (status != FWK_SUCCESS) - return status; - - new_opp = adjust_opp_for_new_limits(ctx, ¤t_opp, limits); - status = __mod_dvfs_set_opp(ctx, new_opp); - if (status != FWK_SUCCESS) - return status; - - ctx->frequency_limits = *limits; - - return FWK_SUCCESS; -} - -const struct mod_dvfs_domain_api __mod_dvfs_domain_api = { - .get_current_opp = api_get_current_opp, - .get_sustained_opp = api_get_sustained_opp, - .get_nth_opp = api_get_nth_opp, - .get_opp_count = api_get_opp_count, - .get_latency = api_get_latency, - .set_frequency = api_set_frequency, - .get_frequency_limits = api_get_frequency_limits, - .set_frequency_limits = api_set_frequency_limits, -}; diff --git a/module/dvfs/src/mod_dvfs_domain_api_private.h b/module/dvfs/src/mod_dvfs_domain_api_private.h deleted file mode 100644 index 5c1b28187..000000000 --- a/module/dvfs/src/mod_dvfs_domain_api_private.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef MOD_DVFS_DOMAIN_API_PRIVATE_H -#define MOD_DVFS_DOMAIN_API_PRIVATE_H - -#include - -/* Module API implementation */ -extern const struct mod_dvfs_domain_api __mod_dvfs_domain_api; - -#endif /* MOD_DVFS_DOMAIN_API_PRIVATE_H */ diff --git a/module/dvfs/src/mod_dvfs_event.c b/module/dvfs/src/mod_dvfs_event.c deleted file mode 100644 index f0224d99e..000000000 --- a/module/dvfs/src/mod_dvfs_event.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include - -static int event_set_opp( - const struct fwk_event *event, - struct fwk_event *response) -{ - return FWK_E_SUPPORT; -} - -static int event_set_frequency_limits( - const struct fwk_event *event, - struct fwk_event *response) -{ - return FWK_E_SUPPORT; -} - -int __mod_dvfs_process_event( - const struct fwk_event *event, - struct fwk_event *response) -{ - typedef int (*handler_t)( - const struct fwk_event *event, - struct fwk_event *response); - - static const handler_t handlers[] = { - [MOD_DVFS_EVENT_IDX_SET_FREQUENCY] = event_set_opp, - [MOD_DVFS_EVENT_IDX_SET_FREQUENCY_LIMITS] = event_set_frequency_limits, - }; - - handler_t handler; - - /* Ensure we have a handler implemented for this event */ - handler = handlers[fwk_id_get_event_idx(event->id)]; - if (handler == NULL) - return FWK_E_PARAM; - - /* Delegate event handling to the relevant handler */ - return handler(event, response); -} diff --git a/module/dvfs/src/mod_dvfs_event_private.h b/module/dvfs/src/mod_dvfs_event_private.h deleted file mode 100644 index cb6c30c94..000000000 --- a/module/dvfs/src/mod_dvfs_event_private.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef MOD_DVFS_EVENT_PRIVATE_H -#define MOD_DVFS_EVENT_PRIVATE_H - -#include - -/* Event handler */ -int __mod_dvfs_process_event( - const struct fwk_event *event, - struct fwk_event *response); - -#endif /* MOD_DVFS_EVENT_PRIVATE_H */ diff --git a/module/dvfs/src/mod_dvfs_module.c b/module/dvfs/src/mod_dvfs_module.c deleted file mode 100644 index e2de6a60b..000000000 --- a/module/dvfs/src/mod_dvfs_module.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct mod_dvfs_domain_ctx (*domain_ctx)[]; - -static int count_opps(const struct mod_dvfs_opp *opps) -{ - const struct mod_dvfs_opp *opp = &opps[0]; - - while ((opp->voltage != 0) && (opp->frequency != 0)) - opp++; - - return opp - &opps[0]; -} - -static struct mod_dvfs_domain_ctx *get_domain_ctx(fwk_id_t domain_id) -{ - unsigned int element_idx = fwk_id_get_element_idx(domain_id); - - return &(*domain_ctx)[element_idx]; -} - -static int dvfs_init( - fwk_id_t module_id, - unsigned int element_count, - const void *data) -{ - domain_ctx = fwk_mm_calloc( - element_count, - sizeof((*domain_ctx)[0])); - if (domain_ctx == NULL) - return FWK_E_NOMEM; - - return FWK_SUCCESS; -} - -static int dvfs_element_init( - fwk_id_t domain_id, - unsigned int sub_element_count, - const void *data) -{ - struct mod_dvfs_domain_ctx *ctx = get_domain_ctx(domain_id); - - assert(sub_element_count == 0); - - /* Initialize the configuration */ - ctx->config = data; - assert(ctx->config->opps != NULL); - - /* Initialize the context */ - ctx->opp_count = count_opps(ctx->config->opps); - assert(ctx->opp_count > 0); - - /* Frequency limits default to the minimum and maximum available */ - ctx->frequency_limits = (struct mod_dvfs_frequency_limits) { - .minimum = ctx->config->opps[0].frequency, - .maximum = ctx->config->opps[ctx->opp_count - 1].frequency, - }; - - ctx->suspended_opp = ctx->config->opps[ctx->config->sustained_idx]; - - return FWK_SUCCESS; -} - -static int dvfs_bind_element(fwk_id_t domain_id, unsigned int round) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx = - get_domain_ctx(domain_id); - - /* Only handle the first round */ - if (round > 0) - return FWK_SUCCESS; - - /* Bind to the power supply module */ - status = fwk_module_bind( - ctx->config->psu_id, mod_psu_api_id_device, &ctx->apis.psu); - if (status != FWK_SUCCESS) - return FWK_E_PANIC; - - /* Bind to the clock module */ - status = fwk_module_bind( - ctx->config->clock_id, - FWK_ID_API(FWK_MODULE_IDX_CLOCK, 0), - &ctx->apis.clock); - if (status != FWK_SUCCESS) - return FWK_E_PANIC; - - return FWK_SUCCESS; -} - -static int dvfs_bind(fwk_id_t id, unsigned int round) -{ - /* We only need to handle binding our elements */ - if (fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) - return dvfs_bind_element(id, round); - - return FWK_SUCCESS; -} - -static int dvfs_process_bind_request_module( - fwk_id_t source_id, - fwk_id_t api_id, - const void **api) -{ - /* Only expose the module API */ - if (!fwk_id_is_equal(api_id, mod_dvfs_api_id_dvfs)) - return FWK_E_PARAM; - - /* We don't do any permissions management */ - *api = &__mod_dvfs_domain_api; - - return FWK_SUCCESS; -} - -static int dvfs_process_bind_request( - fwk_id_t source_id, - fwk_id_t target_id, - fwk_id_t api_id, - const void **api) -{ - /* Only allow binding to the module */ - if (!fwk_id_is_equal(target_id, fwk_module_id_dvfs)) - return FWK_E_PARAM; - - return dvfs_process_bind_request_module(source_id, api_id, api); -} - -static int dvfs_start(fwk_id_t id) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx; - - if (!fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) - return FWK_SUCCESS; - - ctx = get_domain_ctx(id); - - /* Register for clock state notifications */ - status = fwk_notification_subscribe( - mod_clock_notification_id_state_changed, - ctx->config->clock_id, - id); - if (status != FWK_SUCCESS) - return status; - - return fwk_notification_subscribe( - mod_clock_notification_id_state_change_pending, - ctx->config->clock_id, - id); -} - -static int dvfs_notify_system_state_transition_suspend(fwk_id_t domain_id) -{ - struct mod_dvfs_domain_ctx *ctx = - get_domain_ctx(domain_id); - - return __mod_dvfs_get_current_opp(ctx, &ctx->suspended_opp); -} - -static int dvfs_notify_system_state_transition_resume(fwk_id_t domain_id) -{ - const struct mod_dvfs_domain_ctx *ctx = - get_domain_ctx(domain_id); - - return __mod_dvfs_set_opp(ctx, &ctx->suspended_opp); -} - -static int dvfs_process_notification( - const struct fwk_event *event, - struct fwk_event *resp_event) -{ - struct clock_notification_params *params; - struct clock_state_change_pending_resp_params *resp_params; - - assert( - fwk_id_is_equal( - event->id, - mod_clock_notification_id_state_changed) || - fwk_id_is_equal( - event->id, - mod_clock_notification_id_state_change_pending)); - assert(fwk_id_is_type(event->target_id, FWK_ID_TYPE_ELEMENT)); - - params = (struct clock_notification_params *)event->params; - - if (fwk_id_is_equal(event->id, mod_clock_notification_id_state_changed)) { - if (params->new_state == MOD_CLOCK_STATE_RUNNING) - return dvfs_notify_system_state_transition_resume(event->target_id); - } else if (params->new_state == MOD_CLOCK_STATE_STOPPED) { - /* DVFS has received the pending change notification */ - resp_params = - (struct clock_state_change_pending_resp_params *)resp_event->params; - resp_params->status = FWK_SUCCESS; - - return dvfs_notify_system_state_transition_suspend(event->target_id); - } - - return FWK_SUCCESS; -} - -struct mod_dvfs_domain_ctx *__mod_dvfs_get_valid_domain_ctx(fwk_id_t domain_id) -{ - return get_domain_ctx(domain_id); -} - -/* Module description */ -const struct fwk_module module_dvfs = { - .name = "DVFS", - .type = FWK_MODULE_TYPE_HAL, - .init = dvfs_init, - .element_init = dvfs_element_init, - .bind = dvfs_bind, - .process_bind_request = dvfs_process_bind_request, - .process_event = __mod_dvfs_process_event, - .start = dvfs_start, - .process_notification = dvfs_process_notification, - .api_count = MOD_DVFS_API_IDX_COUNT, - .event_count = MOD_DVFS_EVENT_IDX_COUNT, -}; diff --git a/module/dvfs/src/mod_dvfs_module_private.h b/module/dvfs/src/mod_dvfs_module_private.h deleted file mode 100644 index 6dc39ed83..000000000 --- a/module/dvfs/src/mod_dvfs_module_private.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef MOD_DVFS_MODULE_PRIVATE_H -#define MOD_DVFS_MODULE_PRIVATE_H - -#include -#include -#include - -/* Domain context */ -struct mod_dvfs_domain_ctx { - /* Domain configuration */ - const struct mod_dvfs_domain_config *config; - - struct { - /* Power supply API */ - const struct mod_psu_device_api *psu; - - /* Clock API */ - const struct mod_clock_api *clock; - } apis; - - /* Number of operating points */ - size_t opp_count; - - /* Operating point prior to domain suspension */ - struct mod_dvfs_opp suspended_opp; - - /* Current operating point limits */ - struct mod_dvfs_frequency_limits frequency_limits; -}; - -struct mod_dvfs_domain_ctx *__mod_dvfs_get_valid_domain_ctx(fwk_id_t domain_id); - -#endif /* MOD_DVFS_MODULE_PRIVATE_H */ diff --git a/module/dvfs/src/mod_dvfs_private.h b/module/dvfs/src/mod_dvfs_private.h deleted file mode 100644 index bdd6ceb0b..000000000 --- a/module/dvfs/src/mod_dvfs_private.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef MOD_DVFS_PRIVATE_H -#define MOD_DVFS_PRIVATE_H - -#include -#include -#include -#include - -#endif /* MOD_DVFS_PRIVATE_H */ diff --git a/module/dvfs/src/mod_dvfs_util.c b/module/dvfs/src/mod_dvfs_util.c deleted file mode 100644 index a9982dffb..000000000 --- a/module/dvfs/src/mod_dvfs_util.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include - -int __mod_dvfs_set_opp( - const struct mod_dvfs_domain_ctx *ctx, - const struct mod_dvfs_opp *new_opp) -{ - int status; - struct mod_dvfs_opp current_opp; - - status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); - if (status != FWK_SUCCESS) - return status; - - if (new_opp->voltage > current_opp.voltage) { - /* Raise the voltage before raising the frequency */ - status = ctx->apis.psu->set_voltage( - ctx->config->psu_id, - new_opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - if (new_opp->frequency != current_opp.frequency) { - status = ctx->apis.clock->set_rate( - ctx->config->clock_id, - new_opp->frequency, - MOD_CLOCK_ROUND_MODE_NONE); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - if (new_opp->voltage < current_opp.voltage) { - /* Lower the voltage after lowering the frequency */ - status = ctx->apis.psu->set_voltage( - ctx->config->psu_id, - new_opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - return FWK_SUCCESS; -} - -int __mod_dvfs_get_current_opp( - const struct mod_dvfs_domain_ctx *ctx, - struct mod_dvfs_opp *opp) -{ - int status; - - status = ctx->apis.clock->get_rate( - ctx->config->clock_id, - &opp->frequency); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - - status = ctx->apis.psu->get_voltage( - ctx->config->psu_id, - &opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - - return FWK_SUCCESS; -} diff --git a/module/dvfs/src/mod_dvfs_util_private.h b/module/dvfs/src/mod_dvfs_util_private.h deleted file mode 100644 index 7d8d37d0e..000000000 --- a/module/dvfs/src/mod_dvfs_util_private.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Arm SCP/MCP Software - * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef MOD_DVFS_UTIL_PRIVATE_H -#define MOD_DVFS_UTIL_PRIVATE_H - -#include -#include - -int __mod_dvfs_set_opp( - const struct mod_dvfs_domain_ctx *ctx, - const struct mod_dvfs_opp *new_opp); - -int __mod_dvfs_get_current_opp( - const struct mod_dvfs_domain_ctx *ctx, - struct mod_dvfs_opp *opp); - -#endif /* MOD_DVFS_PRIVATE_H */ -- GitLab From 98a0982ec99fb62a2ba5fb0f5efc091c18a1f75d Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Mon, 21 Oct 2019 12:23:04 +0100 Subject: [PATCH 05/10] scmi_perf: Add support for pending requests This patch adds support for handling deferred requests to the SCMI performance module. It follows the 'deferred_response_architecture' approach. For SCMI, this means that when the request is deferred, the response to the channel is delayed until the request has been processed. Change-Id: Ifc71f5976d0bf5f11ecff93ff7b9bb40cc208982 Signed-off-by: Jim Quigley --- module/scmi_perf/src/mod_scmi_perf.c | 376 +++++++++++++++++++++------ 1 file changed, 291 insertions(+), 85 deletions(-) diff --git a/module/scmi_perf/src/mod_scmi_perf.c b/module/scmi_perf/src/mod_scmi_perf.c index 9edf57860..2cb91247f 100644 --- a/module/scmi_perf/src/mod_scmi_perf.c +++ b/module/scmi_perf/src/mod_scmi_perf.c @@ -11,8 +11,12 @@ #include #include #include +#include #include #include +#ifdef BUILD_HAS_MULTITHREADING +#include +#endif #include #include #include @@ -20,20 +24,6 @@ #include #include -struct scmi_perf_ctx { - /* SCMI Performance Module Configuration */ - const struct mod_scmi_perf_config *config; - - /* Number of power domains */ - unsigned int domain_count; - - /* SCMI module API */ - const struct mod_scmi_from_protocol_api *scmi_api; - - /* DVFS module API */ - const struct mod_dvfs_domain_api *dvfs_api; -}; - static int scmi_perf_protocol_version_handler( fwk_id_t service_id, const uint32_t *payload); static int scmi_perf_protocol_attributes_handler( @@ -53,8 +43,6 @@ static int scmi_perf_limits_set_handler( static int scmi_perf_limits_get_handler( fwk_id_t service_id, const uint32_t *payload); -static struct scmi_perf_ctx scmi_perf_ctx; - static int (*handler_table[])(fwk_id_t, const uint32_t *) = { [SCMI_PROTOCOL_VERSION] = scmi_perf_protocol_version_handler, @@ -95,6 +83,53 @@ static unsigned int payload_size_table[] = { sizeof(struct scmi_perf_limits_get_a2p), }; +struct perf_operations { + /* + * Service identifier currently requesting operation. + * A 'none' value means that there is no pending request. + */ + fwk_id_t service_id; +}; + + +struct scmi_perf_ctx { + /* SCMI Performance Module Configuration */ + const struct mod_scmi_perf_config *config; + + /* Number of power domains */ + uint32_t domain_count; + + /* SCMI module API */ + const struct mod_scmi_from_protocol_api *scmi_api; + + /* DVFS module API */ + const struct mod_dvfs_domain_api *dvfs_api; + + /* Pointer to a table of operations */ + struct perf_operations *perf_ops_table; +}; + +static struct scmi_perf_ctx scmi_perf_ctx; + +/* Event indices */ +enum scmi_perf_event_idx { + SCMI_PERF_EVENT_IDX_LEVEL_GET_REQUEST, + SCMI_PERF_EVENT_IDX_LIMITS_GET_REQUEST, + SCMI_PERF_EVENT_IDX_COUNT, +}; + +struct scmi_perf_event_parameters { + fwk_id_t domain_id; +}; + +static const fwk_id_t scmi_perf_get_level = + FWK_ID_EVENT_INIT(FWK_MODULE_IDX_SCMI_PERF, + SCMI_PERF_EVENT_IDX_LEVEL_GET_REQUEST); + +static const fwk_id_t scmi_perf_get_limits = + FWK_ID_EVENT_INIT(FWK_MODULE_IDX_SCMI_PERF, + SCMI_PERF_EVENT_IDX_LIMITS_GET_REQUEST); + /* * Protocol command handlers */ @@ -135,15 +170,13 @@ static int scmi_perf_protocol_message_attributes_handler(fwk_id_t service_id, const uint32_t *payload) { const struct scmi_protocol_message_attributes_a2p *parameters; - unsigned int message_id; struct scmi_protocol_message_attributes_p2a return_values; - parameters = - (const struct scmi_protocol_message_attributes_a2p *)payload; - message_id = parameters->message_id; + parameters = (const struct scmi_protocol_message_attributes_a2p *) + payload; - if ((message_id < FWK_ARRAY_SIZE(handler_table)) && - (handler_table[message_id] != NULL)) { + if ((parameters->message_id < FWK_ARRAY_SIZE(handler_table)) && + (handler_table[parameters->message_id] != NULL)) { return_values = (struct scmi_protocol_message_attributes_p2a) { .status = SCMI_SUCCESS, .attributes = 0, /* All commands have an attributes value of 0 */ @@ -165,15 +198,16 @@ static int scmi_perf_domain_attributes_handler(fwk_id_t service_id, unsigned int agent_id; const struct mod_scmi_perf_domain_config *domain; const struct scmi_perf_domain_attributes_a2p *parameters; - struct scmi_perf_domain_attributes_p2a return_values; uint32_t permissions; fwk_id_t domain_id; struct mod_dvfs_opp opp; + struct scmi_perf_domain_attributes_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; - return_values.status = SCMI_GENERIC_ERROR; + parameters = (const struct scmi_perf_domain_attributes_a2p *)payload; /* Validate the domain identifier */ - parameters = (const struct scmi_perf_domain_attributes_a2p *)payload; if (parameters->domain_id >= scmi_perf_ctx.domain_count) { status = FWK_SUCCESS; return_values.status = SCMI_NOT_FOUND; @@ -188,7 +222,7 @@ static int scmi_perf_domain_attributes_handler(fwk_id_t service_id, domain = &(*scmi_perf_ctx.config->domains)[parameters->domain_id]; permissions = (*domain->permissions)[agent_id]; - domain_id = FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), + domain_id = FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id); status = scmi_perf_ctx.dvfs_api->get_sustained_opp(domain_id, &opp); if (status != FWK_SUCCESS) goto exit; @@ -206,8 +240,8 @@ static int scmi_perf_domain_attributes_handler(fwk_id_t service_id, }; /* Copy the domain name into the mailbox */ - strncpy((char *)return_values.name, fwk_module_get_name(domain_id), - sizeof(return_values.name) - 1); + strncpy((char *)return_values.name, + fwk_module_get_name(domain_id), sizeof(return_values.name) - 1); exit: scmi_perf_ctx.scmi_api->respond(service_id, &return_values, @@ -223,7 +257,6 @@ static int scmi_perf_describe_levels_handler(fwk_id_t service_id, int status; size_t max_payload_size; const struct scmi_perf_describe_levels_a2p *parameters; - struct scmi_perf_describe_levels_p2a return_values; fwk_id_t domain_id; struct scmi_perf_level perf_level; unsigned int num_levels, level_index, level_index_max; @@ -231,8 +264,10 @@ static int scmi_perf_describe_levels_handler(fwk_id_t service_id, size_t opp_count; struct mod_dvfs_opp opp; uint16_t latency; + struct scmi_perf_describe_levels_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; - return_values.status = SCMI_GENERIC_ERROR; payload_size = sizeof(return_values); status = scmi_perf_ctx.scmi_api->get_max_payload_size(service_id, @@ -242,13 +277,12 @@ static int scmi_perf_describe_levels_handler(fwk_id_t service_id, status = (SCMI_PERF_LEVELS_MAX(max_payload_size) > 0) ? FWK_SUCCESS : FWK_E_SIZE; - assert(status == FWK_SUCCESS); if (status != FWK_SUCCESS) goto exit; + parameters = (const struct scmi_perf_describe_levels_a2p *)payload; /* Validate the domain identifier */ - parameters = (const struct scmi_perf_describe_levels_a2p *)payload; if (parameters->domain_id >= scmi_perf_ctx.domain_count) { return_values.status = SCMI_NOT_FOUND; @@ -326,12 +360,13 @@ static int scmi_perf_limits_set_handler(fwk_id_t service_id, unsigned int agent_id; const struct mod_scmi_perf_domain_config *domain; const struct scmi_perf_limits_set_a2p *parameters; - struct scmi_perf_limits_set_p2a return_values; uint32_t permissions; - - return_values.status = SCMI_GENERIC_ERROR; + struct scmi_perf_limits_set_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; parameters = (const struct scmi_perf_limits_set_a2p *)payload; + if (parameters->domain_id >= scmi_perf_ctx.domain_count) { status = FWK_SUCCESS; return_values.status = SCMI_NOT_FOUND; @@ -358,23 +393,21 @@ static int scmi_perf_limits_set_handler(fwk_id_t service_id, goto exit; } - /* Execute the transition asynchronously */ status = scmi_perf_ctx.dvfs_api->set_frequency_limits( FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), &((struct mod_dvfs_frequency_limits) { - .minimum = parameters->range_min, - .maximum = parameters->range_max - })); - if (status != FWK_SUCCESS) { - status = FWK_SUCCESS; - return_values.status = SCMI_OUT_OF_RANGE; + .minimum = parameters->range_min, + .maximum = parameters->range_max + })); - goto exit; - } + /* + * Return immediately to the caller, fire-and-forget. + */ - return_values = (struct scmi_perf_limits_set_p2a) { - .status = SCMI_SUCCESS, - }; + if ((status == FWK_SUCCESS) || (status == FWK_PENDING)) + return_values.status = SCMI_SUCCESS; + else + return_values.status = SCMI_OUT_OF_RANGE; exit: scmi_perf_ctx.scmi_api->respond(service_id, &return_values, @@ -388,11 +421,12 @@ static int scmi_perf_limits_get_handler(fwk_id_t service_id, const uint32_t *payload) { int status; + fwk_id_t domain_id; const struct scmi_perf_limits_get_a2p *parameters; - struct scmi_perf_limits_get_p2a return_values; - struct mod_dvfs_frequency_limits limits; - - return_values.status = SCMI_GENERIC_ERROR; + struct scmi_perf_event_parameters *evt_params; + struct scmi_perf_limits_get_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; parameters = (const struct scmi_perf_limits_get_a2p *)payload; if (parameters->domain_id >= scmi_perf_ctx.domain_count) { @@ -402,21 +436,42 @@ static int scmi_perf_limits_get_handler(fwk_id_t service_id, goto exit; } - status = scmi_perf_ctx.dvfs_api->get_frequency_limits( - FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), &limits); - if (status != FWK_SUCCESS) + domain_id = FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id); + + /* Check if there is already a request pending for this domain */ + if (!fwk_id_is_equal( + scmi_perf_ctx.perf_ops_table[parameters->domain_id].service_id, + FWK_ID_NONE)){ + return_values.status = SCMI_BUSY; + status = FWK_SUCCESS; + goto exit; + } - return_values = (struct scmi_perf_limits_get_p2a) { - .status = SCMI_SUCCESS, - .range_min = (uint32_t)limits.minimum, - .range_max = (uint32_t)limits.maximum + /* The get_limits request is processed within the event being generated */ + struct fwk_event event = { + .target_id = fwk_module_id_scmi_perf, + .id = scmi_perf_get_limits, }; + evt_params = (struct scmi_perf_event_parameters *)event.params; + evt_params->domain_id = domain_id; + + status = fwk_thread_put_event(&event); + if (status != FWK_SUCCESS) { + return_values.status = SCMI_GENERIC_ERROR; + + goto exit; + } + + /* Store service identifier to indicate there is a pending request */ + scmi_perf_ctx.perf_ops_table[parameters->domain_id].service_id = service_id; + + return FWK_SUCCESS; + exit: scmi_perf_ctx.scmi_api->respond(service_id, &return_values, - (return_values.status == SCMI_SUCCESS) ? - sizeof(return_values) : sizeof(return_values.status)); + sizeof(return_values.status)); return status; } @@ -428,12 +483,13 @@ static int scmi_perf_level_set_handler(fwk_id_t service_id, unsigned int agent_id; const struct mod_scmi_perf_domain_config *domain; const struct scmi_perf_level_set_a2p *parameters; - struct scmi_perf_level_set_p2a return_values; uint32_t permissions; - - return_values.status = SCMI_GENERIC_ERROR; + struct scmi_perf_level_set_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; parameters = (const struct scmi_perf_level_set_a2p *)payload; + if (parameters->domain_id >= scmi_perf_ctx.domain_count) { status = FWK_SUCCESS; return_values.status = SCMI_NOT_FOUND; @@ -454,21 +510,17 @@ static int scmi_perf_level_set_handler(fwk_id_t service_id, goto exit; } - /* Execute the transition synchronously */ status = scmi_perf_ctx.dvfs_api->set_frequency( - FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), - parameters->performance_level); - - if (status == FWK_E_RANGE) { - status = FWK_SUCCESS; + FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), + (uint64_t)parameters->performance_level); + + /* + * Return immediately to the caller, fire-and-forget. + */ + if ((status == FWK_SUCCESS) || (status == FWK_PENDING)) + return_values.status = SCMI_SUCCESS; + else if (status == FWK_E_RANGE) return_values.status = SCMI_OUT_OF_RANGE; - } else if (status != FWK_SUCCESS) { - return_values.status = SCMI_GENERIC_ERROR; - } else { - return_values = (struct scmi_perf_level_set_p2a) { - .status = SCMI_SUCCESS, - }; - } exit: scmi_perf_ctx.scmi_api->respond(service_id, &return_values, @@ -483,8 +535,10 @@ static int scmi_perf_level_get_handler(fwk_id_t service_id, { int status; const struct scmi_perf_level_get_a2p *parameters; - struct scmi_perf_level_get_p2a return_values; - struct mod_dvfs_opp opp; + struct scmi_perf_event_parameters *evt_params; + struct scmi_perf_level_get_p2a return_values = { + .status = SCMI_GENERIC_ERROR, + }; return_values.status = SCMI_GENERIC_ERROR; @@ -496,17 +550,38 @@ static int scmi_perf_level_get_handler(fwk_id_t service_id, goto exit; } - status = scmi_perf_ctx.dvfs_api->get_current_opp( - FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, parameters->domain_id), &opp); - if (status != FWK_SUCCESS) + /* Check if there is already a request pending for this domain */ + if (!fwk_id_is_equal( + scmi_perf_ctx.perf_ops_table[parameters->domain_id].service_id, + FWK_ID_NONE)){ + return_values.status = SCMI_BUSY; + status = FWK_SUCCESS; + goto exit; + } - /* Return the frequency as the performance level */ - return_values = (struct scmi_perf_level_get_p2a) { - .status = SCMI_SUCCESS, - .performance_level = (uint32_t)opp.frequency, + /* The get_level request is processed within the event being generated */ + struct fwk_event event = { + .target_id = fwk_module_id_scmi_perf, + .id = scmi_perf_get_level, }; + evt_params = (struct scmi_perf_event_parameters *)event.params; + evt_params->domain_id = FWK_ID_ELEMENT(FWK_MODULE_IDX_DVFS, + parameters->domain_id); + + status = fwk_thread_put_event(&event); + if (status != FWK_SUCCESS) { + return_values.status = SCMI_GENERIC_ERROR; + + goto exit; + } + + /* Store service identifier to indicate there is a pending request */ + scmi_perf_ctx.perf_ops_table[parameters->domain_id].service_id = service_id; + + return FWK_SUCCESS; + exit: scmi_perf_ctx.scmi_api->respond(service_id, &return_values, (return_values.status == SCMI_SUCCESS) ? @@ -561,6 +636,33 @@ static struct mod_scmi_to_protocol_api scmi_perf_mod_scmi_to_protocol_api = { .message_handler = scmi_perf_message_handler }; +/* + * Static helpers for responding to SCMI. + */ +static void scmi_perf_respond( + void *return_values, + fwk_id_t domain_id, + int size) +{ + int idx = fwk_id_get_element_idx(domain_id); + fwk_id_t service_id; + + /* + * The service identifier used for the response is retrieved from the + * domain operations table. + */ + service_id = scmi_perf_ctx.perf_ops_table[idx].service_id; + + scmi_perf_ctx.scmi_api->respond(service_id, + return_values, size); + + /* + * Set the service identifier to 'none' to indicate the domain is + * available again. + */ + scmi_perf_ctx.perf_ops_table[idx].service_id = FWK_ID_NONE; +} + /* * Framework handlers */ @@ -568,6 +670,7 @@ static int scmi_perf_init(fwk_id_t module_id, unsigned int element_count, const void *data) { int return_val; + int i; const struct mod_scmi_perf_config *config = (const struct mod_scmi_perf_config *)data; @@ -579,9 +682,18 @@ static int scmi_perf_init(fwk_id_t module_id, unsigned int element_count, if (return_val <= 0) return FWK_E_SUPPORT; + scmi_perf_ctx.perf_ops_table = fwk_mm_calloc(return_val, + sizeof(struct perf_operations)); + if (scmi_perf_ctx.perf_ops_table == NULL) + return FWK_E_NOMEM; + scmi_perf_ctx.config = config; scmi_perf_ctx.domain_count = return_val; + /* Initialize table */ + for (i = 0; i < return_val; i++) + scmi_perf_ctx.perf_ops_table[i].service_id = FWK_ID_NONE; + return FWK_SUCCESS; } @@ -612,12 +724,106 @@ static int scmi_perf_process_bind_request(fwk_id_t source_id, return FWK_SUCCESS; } + +/* + * Handle a request for get_level/limits. + */ +static int process_request_event(const struct fwk_event *event) +{ + int status; + struct mod_dvfs_frequency_limits limits; + struct scmi_perf_event_parameters *params; + struct scmi_perf_level_get_p2a return_values_level; + struct scmi_perf_limits_get_p2a return_values_limits; + struct mod_dvfs_opp opp; + + /* request event to DVFS HAL */ + if (fwk_id_is_equal(event->id, scmi_perf_get_level)) { + params = (struct scmi_perf_event_parameters *)event->params; + + status = scmi_perf_ctx.dvfs_api->get_current_opp(params->domain_id, + &opp); + if (status == FWK_SUCCESS) { + /* DVFS value is ready */ + return_values_level = (struct scmi_perf_level_get_p2a) { + .status = SCMI_SUCCESS, + .performance_level = (uint32_t)opp.frequency, + }; + + scmi_perf_respond(&return_values_level, params->domain_id, + sizeof(return_values_level)); + + return status; + } else if (status == FWK_PENDING) { + /* DVFS value will be provided through a response event */ + return FWK_SUCCESS; + } else { + return_values_level = (struct scmi_perf_level_get_p2a) { + .status = SCMI_HARDWARE_ERROR, + }; + + scmi_perf_respond(&return_values_level, params->domain_id, + sizeof(return_values_level.status)); + + return FWK_E_DEVICE; + } + } + + if (fwk_id_is_equal(event->id, scmi_perf_get_limits)) { + params = (struct scmi_perf_event_parameters *)event->params; + + status = scmi_perf_ctx.dvfs_api->get_frequency_limits(params->domain_id, + &limits); + if (status == FWK_SUCCESS) { + /* DVFS value is ready */ + return_values_limits = (struct scmi_perf_limits_get_p2a) { + .status = SCMI_SUCCESS, + .range_min = limits.minimum, + .range_max = limits.maximum, + }; + + scmi_perf_respond(&return_values_limits, params->domain_id, + sizeof(return_values_limits)); + + return status; + } else if (status == FWK_PENDING) { + /* DVFS value will be provided through a response event */ + return FWK_SUCCESS; + } else { + return_values_limits = (struct scmi_perf_limits_get_p2a) { + .status = SCMI_HARDWARE_ERROR, + }; + + scmi_perf_respond(&return_values_limits, params->domain_id, + sizeof(return_values_limits.status)); + + return FWK_E_DEVICE; + } + } + + return FWK_E_PARAM; +} + +static int scmi_perf_process_event(const struct fwk_event *event, + struct fwk_event *resp_event) +{ + /* Request events from SCMI */ + if (fwk_id_get_module_idx(event->source_id) == + fwk_id_get_module_idx(fwk_module_id_scmi)) + return process_request_event(event); + + return FWK_E_PARAM; +} + + /* SCMI Performance Management Protocol Definition */ const struct fwk_module module_scmi_perf = { .name = "SCMI Performance Management Protocol", .api_count = 1, + .event_count = SCMI_PERF_EVENT_IDX_COUNT, .type = FWK_MODULE_TYPE_PROTOCOL, .init = scmi_perf_init, .bind = scmi_perf_bind, .process_bind_request = scmi_perf_process_bind_request, + .process_event = scmi_perf_process_event, }; -- GitLab From b7cfd45d6d75c40102cd6624144bc81f9f1d54bd Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Fri, 22 Nov 2019 10:17:28 +0000 Subject: [PATCH 06/10] DVFS: Remove notifications from DVFS HAL DVFS HAL module is not required to handle notifications. The clock drivers are responsible for saving the H/W state during suspend/resume operations. There is nothing to do with the DVFS context for suspend/ resume. Change-Id: I2d401d80a91aa174c72938d017897ef1119324f2 Signed-off-by: Jim Quigley --- module/dvfs/src/mod_dvfs.c | 82 -------------------------------------- 1 file changed, 82 deletions(-) diff --git a/module/dvfs/src/mod_dvfs.c b/module/dvfs/src/mod_dvfs.c index 397c57d67..bf74eb124 100644 --- a/module/dvfs/src/mod_dvfs.c +++ b/module/dvfs/src/mod_dvfs.c @@ -34,9 +34,6 @@ struct mod_dvfs_domain_ctx { /* Number of operating points */ size_t opp_count; - /* Operating point prior to domain suspension */ - struct mod_dvfs_opp suspended_opp; - /* Current operating point limits */ struct mod_dvfs_frequency_limits frequency_limits; }; @@ -457,8 +454,6 @@ dvfs_element_init(fwk_id_t domain_id, .maximum = ctx->config->opps[ctx->opp_count - 1].frequency, }; - ctx->suspended_opp = ctx->config->opps[ctx->config->sustained_idx]; - return FWK_SUCCESS; } @@ -527,81 +522,6 @@ dvfs_process_bind_request(fwk_id_t source_id, return dvfs_process_bind_request_module(source_id, api_id, api); } -static int -dvfs_start(fwk_id_t id) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx; - - if (!fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) - return FWK_SUCCESS; - - ctx = get_domain_ctx(id); - - /* Register for clock state notifications */ - status = fwk_notification_subscribe( - mod_clock_notification_id_state_changed, - ctx->config->clock_id, - id); - if (status != FWK_SUCCESS) - return status; - - return fwk_notification_subscribe( - mod_clock_notification_id_state_change_pending, - ctx->config->clock_id, - id); -} - -static int -dvfs_notify_system_state_transition_suspend(fwk_id_t domain_id) -{ - struct mod_dvfs_domain_ctx *ctx = - get_domain_ctx(domain_id); - - return __mod_dvfs_get_current_opp(ctx, &ctx->suspended_opp); -} - -static int -dvfs_notify_system_state_transition_resume(fwk_id_t domain_id) -{ - const struct mod_dvfs_domain_ctx *ctx = - get_domain_ctx(domain_id); - - return __mod_dvfs_set_opp(ctx, &ctx->suspended_opp); -} - -static int -dvfs_process_notification(const struct fwk_event *event, - struct fwk_event *resp_event) -{ - struct clock_notification_params *params; - struct clock_state_change_pending_resp_params *resp_params; - - fwk_assert( - fwk_id_is_equal( - event->id, - mod_clock_notification_id_state_changed) || - fwk_id_is_equal( - event->id, - mod_clock_notification_id_state_change_pending)); - fwk_assert(fwk_id_is_type(event->target_id, FWK_ID_TYPE_ELEMENT)); - - params = (struct clock_notification_params *)event->params; - - if (fwk_id_is_equal(event->id, mod_clock_notification_id_state_changed)) { - if (params->new_state == MOD_CLOCK_STATE_RUNNING) - return dvfs_notify_system_state_transition_resume(event->target_id); - } else if (params->new_state == MOD_CLOCK_STATE_STOPPED) { - /* DVFS has received the pending change notification */ - resp_params = - (struct clock_state_change_pending_resp_params *)resp_event->params; - resp_params->status = FWK_SUCCESS; - - return dvfs_notify_system_state_transition_suspend(event->target_id); - } - - return FWK_SUCCESS; -} /* Module description */ const struct fwk_module module_dvfs = { .name = "DVFS", @@ -611,8 +531,6 @@ const struct fwk_module module_dvfs = { .bind = dvfs_bind, .process_bind_request = dvfs_process_bind_request, .process_event = __mod_dvfs_process_event, - .start = dvfs_start, - .process_notification = dvfs_process_notification, .api_count = MOD_DVFS_API_IDX_COUNT, .event_count = MOD_DVFS_EVENT_IDX_COUNT, }; -- GitLab From 0b644f2745a45df069fa1474f3a870489f46fb3c Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Wed, 20 Nov 2019 18:27:01 +0000 Subject: [PATCH 07/10] DVFS: Add support for pending requests This patch adds support for handling deferred requests to the DVFS module. It follows the 'deferred_response_architecture' approach. Change-Id: I2e5ea59c1b2610e347301f8841aca6eafcbcfa95 Signed-off-by: Jim Quigley --- module/dvfs/include/mod_dvfs.h | 41 +- module/dvfs/src/mod_dvfs.c | 709 ++++++++++++++++++++------- module/scmi_perf/src/mod_scmi_perf.c | 31 +- 3 files changed, 568 insertions(+), 213 deletions(-) diff --git a/module/dvfs/include/mod_dvfs.h b/module/dvfs/include/mod_dvfs.h index f696f0d58..252108f16 100644 --- a/module/dvfs/include/mod_dvfs.h +++ b/module/dvfs/include/mod_dvfs.h @@ -180,23 +180,19 @@ struct mod_dvfs_domain_api { */ /*! - * \brief Set operating point event response parameters. + * \brief Get current OPP event response parameters. */ -struct mod_dvfs_event_params_set_frequency_response { - int status; /*!< Status of the request */ -}; +struct mod_dvfs_params_response { + /*! Event response status */ + int status; -/*! - * \brief Set limits event response parameters. - */ -struct mod_dvfs_event_params_set_frequency_limits_response { - int status; /*!< Status of the request */ + /*! Event response frequency */ + uint64_t performance_level; }; /*! * \} */ - /*! * \defgroup GroupDvfsIds Identifiers * \{ @@ -218,25 +214,18 @@ static const fwk_id_t mod_dvfs_api_id_dvfs = * \brief Event indices. */ enum mod_dvfs_event_idx { - /*! Event index for mod_dvfs_event_id_set_frequency() */ - MOD_DVFS_EVENT_IDX_SET_FREQUENCY, - - /*! Event index for mod_dvfs_event_id_set_frequency_limits() */ - MOD_DVFS_EVENT_IDX_SET_FREQUENCY_LIMITS, - - /*! Number of defined events */ - MOD_DVFS_EVENT_IDX_COUNT + MOD_DVFS_EVENT_IDX_SET, /*!< Set level/limits */ + MOD_DVFS_EVENT_IDX_GET_OPP, /*!< Get frequency */ + MOD_DVFS_EVENT_IDX_COUNT, /*!< event count */ }; -/*! Set operating point event identifier */ -static const fwk_id_t mod_dvfs_event_id_set_frequency = - FWK_ID_EVENT_INIT(FWK_MODULE_IDX_DVFS, MOD_DVFS_EVENT_IDX_SET_FREQUENCY); +/*! Set operating point/limits event identifier */ +static const fwk_id_t mod_dvfs_event_id_set = + FWK_ID_EVENT_INIT(FWK_MODULE_IDX_DVFS, MOD_DVFS_EVENT_IDX_SET); -/*! Set frequency limits event identifier */ -static const fwk_id_t mod_dvfs_event_id_set_frequency_limits = - FWK_ID_EVENT_INIT( - FWK_MODULE_IDX_DVFS, - MOD_DVFS_EVENT_IDX_SET_FREQUENCY_LIMITS); +/*! Get current OPP event identifier */ +static const fwk_id_t mod_dvfs_event_id_get_opp = + FWK_ID_EVENT_INIT(FWK_MODULE_IDX_DVFS, MOD_DVFS_EVENT_IDX_GET_OPP); /*! * \} diff --git a/module/dvfs/src/mod_dvfs.c b/module/dvfs/src/mod_dvfs.c index bf74eb124..cab49f01f 100644 --- a/module/dvfs/src/mod_dvfs.c +++ b/module/dvfs/src/mod_dvfs.c @@ -12,13 +12,51 @@ #include #include #include +#ifdef BUILD_HAS_MULTITHREADING +#include +#endif #include #include #include #include #include -/* Domain context */ +/*! + * \brief Domain states for GET_OPP/SET_OPP. + */ +enum mod_dvfs_domain_state { + DVFS_DOMAIN_STATE_IDLE = 0, + + /* Waiting for GET_OPP request */ + DVFS_DOMAIN_GET_OPP, + + /* Waiting for SET_OPP request */ + DVFS_DOMAIN_SET_OPP, + + /* set_rate() in progress, set_voltage() next */ + DVFS_DOMAIN_SET_VOLTAGE, + + /* set_voltage() in progress, set_frequency() next */ + DVFS_DOMAIN_SET_FREQUENCY, + + /* waiting for SET_OPP to complete */ + DVFS_DOMAIN_SET_OPP_DONE, +}; + +/*! + * \brief Request for SET_OPP. + */ +struct mod_dvfs_request { + /* New operating point data for the request */ + struct mod_dvfs_opp new_opp; + + /* New operating point limits for the active request */ + struct mod_dvfs_frequency_limits frequency_limits; +}; + +/*! + * \brief Domain context. + */ struct mod_dvfs_domain_ctx { /* Domain configuration */ const struct mod_dvfs_domain_config *config; @@ -34,28 +72,39 @@ struct mod_dvfs_domain_ctx { /* Number of operating points */ size_t opp_count; - /* Current operating point limits */ + /* Current operating point */ + struct mod_dvfs_opp current_opp; + + /* Current frequency limits */ struct mod_dvfs_frequency_limits frequency_limits; + + /* Current request details */ + struct mod_dvfs_request request; + + /* next state */ + enum mod_dvfs_domain_state state; + + /* cookie for deferred request response */ + uint32_t cookie; }; +static uint32_t dvfs_domain_element_count = 0; static struct mod_dvfs_domain_ctx (*domain_ctx)[]; +/* + * DVFS Helper Functions + */ static struct mod_dvfs_domain_ctx *get_domain_ctx(fwk_id_t domain_id) { - return &(*domain_ctx)[fwk_id_get_element_idx(domain_id)]; -} + uint32_t idx = fwk_id_get_element_idx(domain_id); -struct mod_dvfs_domain_ctx *__mod_dvfs_get_valid_domain_ctx(fwk_id_t domain_id) -{ - return get_domain_ctx(domain_id); + if (idx < dvfs_domain_element_count) + return &(*domain_ctx)[idx]; + else + return NULL; } - -/* - * DVFS Helper Functions - */ -static int -count_opps(const struct mod_dvfs_opp *opps) +static int count_opps(const struct mod_dvfs_opp *opps) { const struct mod_dvfs_opp *opp = &opps[0]; @@ -74,7 +123,7 @@ static const struct mod_dvfs_opp *get_opp_for_values( const struct mod_dvfs_opp *opp; /* A value of zero indicates the parameter should be ignored */ - assert((frequency != 0) || (voltage != 0)); + fwk_assert((frequency != 0) || (voltage != 0)); for (opp_idx = 0; opp_idx < ctx->opp_count; opp_idx++) { opp = &ctx->config->opps[opp_idx]; @@ -93,16 +142,14 @@ static const struct mod_dvfs_opp *get_opp_for_values( return NULL; } -static bool -is_opp_within_limits(const struct mod_dvfs_opp *opp, +static bool is_opp_within_limits(const struct mod_dvfs_opp *opp, const struct mod_dvfs_frequency_limits *limits) { return (opp->frequency >= limits->minimum) && (opp->frequency <= limits->maximum); } -static bool -are_limits_valid(const struct mod_dvfs_domain_ctx *ctx, +static bool are_limits_valid(const struct mod_dvfs_domain_ctx *ctx, const struct mod_dvfs_frequency_limits *limits) { if (limits->minimum > limits->maximum) @@ -117,8 +164,7 @@ are_limits_valid(const struct mod_dvfs_domain_ctx *ctx, return true; } -static const struct -mod_dvfs_opp *adjust_opp_for_new_limits( +static const struct mod_dvfs_opp *adjust_opp_for_new_limits( const struct mod_dvfs_domain_ctx *ctx, const struct mod_dvfs_opp *opp, const struct mod_dvfs_frequency_limits *limits) @@ -138,99 +184,37 @@ mod_dvfs_opp *adjust_opp_for_new_limits( } /* - * DVFS Utilities + * Helper to create events to process requests asynchronously */ -static int -__mod_dvfs_get_current_opp(const struct mod_dvfs_domain_ctx *ctx, - struct mod_dvfs_opp *opp) +static int put_event_request(fwk_id_t domain_id, + struct mod_dvfs_domain_ctx *ctx, + fwk_id_t event_id, + enum mod_dvfs_domain_state state, + bool response_required) { - int status; + struct fwk_event req; - status = ctx->apis.clock->get_rate( - ctx->config->clock_id, - &opp->frequency); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; + ctx->state = state; - status = ctx->apis.psu->get_voltage( - ctx->config->psu_id, - &opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; + req = (struct fwk_event) { + .target_id = domain_id, + .id = event_id, + .response_requested = response_required, + }; - return FWK_SUCCESS; + return fwk_thread_put_event(&req); } -static int -__mod_dvfs_set_opp(const struct mod_dvfs_domain_ctx *ctx, - const struct mod_dvfs_opp *new_opp) -{ - int status; - struct mod_dvfs_opp current_opp; - - status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); - if (status != FWK_SUCCESS) - return status; - - if (new_opp->voltage > current_opp.voltage) { - /* Raise the voltage before raising the frequency */ - status = ctx->apis.psu->set_voltage( - ctx->config->psu_id, - new_opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - if (new_opp->frequency != current_opp.frequency) { - status = ctx->apis.clock->set_rate( - ctx->config->clock_id, - new_opp->frequency, - MOD_CLOCK_ROUND_MODE_NONE); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - if (new_opp->voltage < current_opp.voltage) { - /* Lower the voltage after lowering the frequency */ - status = ctx->apis.psu->set_voltage( - ctx->config->psu_id, - new_opp->voltage); - if (status != FWK_SUCCESS) - return FWK_E_DEVICE; - } - - return FWK_SUCCESS; -} /* - * DVFS Domain API Support + * DVFS module synchronous API functions */ -static int -api_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) -{ - int status; - const struct mod_dvfs_domain_ctx *ctx; - - fwk_assert(opp != NULL); - - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); - if (ctx == NULL) - return FWK_E_PARAM; - - status = __mod_dvfs_get_current_opp(ctx, opp); - if (status != FWK_SUCCESS) - return status; - - return FWK_SUCCESS; -} - -static int -api_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) +static int dvfs_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) { const struct mod_dvfs_domain_ctx *ctx; fwk_assert(opp != NULL); - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -242,8 +226,7 @@ api_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) return FWK_SUCCESS; } -static int -api_get_nth_opp(fwk_id_t domain_id, +static int dvfs_get_nth_opp(fwk_id_t domain_id, size_t n, struct mod_dvfs_opp *opp) { @@ -251,7 +234,7 @@ api_get_nth_opp(fwk_id_t domain_id, fwk_assert(opp != NULL); - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -263,14 +246,13 @@ api_get_nth_opp(fwk_id_t domain_id, return FWK_SUCCESS; } -static int -api_get_opp_count(fwk_id_t domain_id, size_t *opp_count) +static int dvfs_get_opp_count(fwk_id_t domain_id, size_t *opp_count) { const struct mod_dvfs_domain_ctx *ctx; fwk_assert(opp_count != NULL); - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -279,14 +261,13 @@ api_get_opp_count(fwk_id_t domain_id, size_t *opp_count) return FWK_SUCCESS; } -static int -api_get_latency(fwk_id_t domain_id, uint16_t *latency) +static int dvfs_get_latency(fwk_id_t domain_id, uint16_t *latency) { const struct mod_dvfs_domain_ctx *ctx; fwk_assert(latency != NULL); - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -295,139 +276,495 @@ api_get_latency(fwk_id_t domain_id, uint16_t *latency) return FWK_SUCCESS; } -static int -api_set_frequency(fwk_id_t domain_id, uint64_t frequency) +static int dvfs_get_frequency_limits(fwk_id_t domain_id, + struct mod_dvfs_frequency_limits *limits) { - int status; - const struct mod_dvfs_domain_ctx *ctx; - const struct mod_dvfs_opp *new_opp; + struct mod_dvfs_domain_ctx *ctx; - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; - /* Only accept frequencies that exist in the operating point table */ - new_opp = get_opp_for_values(ctx, frequency, 0); - if (new_opp == NULL) - return FWK_E_RANGE; + *limits = ctx->frequency_limits; + return FWK_SUCCESS; +} - if (!is_opp_within_limits(new_opp, &ctx->frequency_limits)) - return FWK_E_RANGE; +/* + * dvfs_get_current_opp() may be either synchronous or asynchronous + */ +static int dvfs_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) +{ + int status; + struct mod_dvfs_domain_ctx *ctx; - status = __mod_dvfs_set_opp(ctx, new_opp); - if (status != FWK_SUCCESS) - return status; + fwk_assert(opp != NULL); - return FWK_SUCCESS; + ctx = get_domain_ctx(domain_id); + if (ctx == NULL) + return FWK_E_PARAM; + + if (ctx->current_opp.frequency != 0) { + opp->frequency = ctx->current_opp.frequency; + opp->voltage = ctx->current_opp.voltage; + return FWK_SUCCESS; + } + + if (ctx->state != DVFS_DOMAIN_STATE_IDLE) + return FWK_E_BUSY; + + status = put_event_request(domain_id, ctx, + mod_dvfs_event_id_get_opp, DVFS_DOMAIN_GET_OPP, true); + if (status == FWK_SUCCESS) { + /* + * We return FWK_PENDING here to indicate to the caller that the + * result of the request is pending and will arrive later through + * an event. + */ + return FWK_PENDING; + } + + return status; } -static int -api_get_frequency_limits(fwk_id_t domain_id, - struct mod_dvfs_frequency_limits *limits) +/* + * DVFS module asynchronous API functions + */ +static int dvfs_set_frequency(fwk_id_t domain_id, uint64_t frequency) { - const struct mod_dvfs_domain_ctx *ctx; + struct mod_dvfs_domain_ctx *ctx; + const struct mod_dvfs_opp *new_opp; - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; - *limits = ctx->frequency_limits; + if (ctx->state != DVFS_DOMAIN_STATE_IDLE) + return FWK_E_BUSY; - return FWK_SUCCESS; + /* Only accept frequencies that exist in the operating point table */ + new_opp = get_opp_for_values(ctx, frequency, 0); + if (new_opp == NULL) + return FWK_E_RANGE; + + if ((new_opp->frequency == ctx->current_opp.frequency) && + (new_opp->voltage == ctx->current_opp.voltage)) + return FWK_SUCCESS; + + if (!is_opp_within_limits(new_opp, &ctx->frequency_limits)) + return FWK_E_RANGE; + + ctx->request.new_opp.frequency = new_opp->frequency; + ctx->request.new_opp.voltage = new_opp->voltage; + ctx->request.frequency_limits.minimum = 0; + ctx->request.frequency_limits.maximum = 0; + + /* + * Note that we do not return FWK_PENDING here. If an event + * is successfully queued we return FWK_SUCCESS to indicate + * that the caller should not expect a deferred response. + */ + return put_event_request(domain_id, ctx, mod_dvfs_event_id_set, + DVFS_DOMAIN_SET_OPP, false); } -static int -api_set_frequency_limits(fwk_id_t domain_id, +static int dvfs_set_frequency_limits(fwk_id_t domain_id, const struct mod_dvfs_frequency_limits *limits) { - int status; struct mod_dvfs_domain_ctx *ctx; - struct mod_dvfs_opp current_opp; const struct mod_dvfs_opp *new_opp; - ctx = __mod_dvfs_get_valid_domain_ctx(domain_id); + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; + if (ctx->state != DVFS_DOMAIN_STATE_IDLE) + return FWK_E_BUSY; + if (!are_limits_valid(ctx, limits)) return FWK_E_PARAM; - status = __mod_dvfs_get_current_opp(ctx, ¤t_opp); - if (status != FWK_SUCCESS) - return status; + new_opp = adjust_opp_for_new_limits(ctx, &ctx->current_opp, limits); - new_opp = adjust_opp_for_new_limits(ctx, ¤t_opp, limits); - status = __mod_dvfs_set_opp(ctx, new_opp); - if (status != FWK_SUCCESS) - return status; - - ctx->frequency_limits = *limits; + if ((new_opp->frequency == ctx->current_opp.frequency) && + (new_opp->voltage == ctx->current_opp.voltage)) { + ctx->frequency_limits = *limits; + return FWK_SUCCESS; + } - return FWK_SUCCESS; + ctx->request.new_opp.frequency = new_opp->frequency; + ctx->request.new_opp.voltage = new_opp->voltage; + ctx->request.frequency_limits.minimum = limits->minimum; + ctx->request.frequency_limits.maximum = limits->maximum; + + /* + * Note that we do not return FWK_PENDING here. If an event + * is successfully queued we return FWK_SUCCESS to indicate + * that the caller should not expect a deferred response. + */ + return put_event_request(domain_id, ctx, mod_dvfs_event_id_set, + DVFS_DOMAIN_SET_OPP, false); } -const struct mod_dvfs_domain_api __mod_dvfs_domain_api = { - .get_current_opp = api_get_current_opp, - .get_sustained_opp = api_get_sustained_opp, - .get_nth_opp = api_get_nth_opp, - .get_opp_count = api_get_opp_count, - .get_latency = api_get_latency, - .set_frequency = api_set_frequency, - .get_frequency_limits = api_get_frequency_limits, - .set_frequency_limits = api_set_frequency_limits, +static const struct mod_dvfs_domain_api mod_dvfs_domain_api = { + .get_current_opp = dvfs_get_current_opp, + .get_sustained_opp = dvfs_get_sustained_opp, + .get_nth_opp = dvfs_get_nth_opp, + .get_opp_count = dvfs_get_opp_count, + .get_latency = dvfs_get_latency, + .set_frequency = dvfs_set_frequency, + .get_frequency_limits = dvfs_get_frequency_limits, + .set_frequency_limits = dvfs_set_frequency_limits, }; + /* - * DVFS Event Handling + * DVFS utility functions */ -static int -event_set_frequency(const struct fwk_event *event, - struct fwk_event *response) +static void dvfs_set_op_done(struct mod_dvfs_domain_ctx *ctx) { - return FWK_E_SUPPORT; + /* + * SET_OPP() successful, store the new values + */ + ctx->current_opp.voltage = ctx->request.new_opp.voltage; + ctx->current_opp.frequency = ctx->request.new_opp.frequency; + + if ((ctx->request.frequency_limits.minimum != 0) || + (ctx->request.frequency_limits.maximum != 0)) { + ctx->frequency_limits = ctx->request.frequency_limits; + } } -static int -event_set_frequency_limits(const struct fwk_event *event, - struct fwk_event *response) +static int dvfs_complete(struct mod_dvfs_domain_ctx *ctx, + const struct fwk_event *event, + struct fwk_event *resp_event, + int req_status, + bool return_opp) { - return FWK_E_SUPPORT; + int status = req_status; + struct fwk_event read_req_event; + struct mod_dvfs_params_response *resp_params; + + if (ctx->cookie != 0) { + resp_params = (struct mod_dvfs_params_response *) + &read_req_event.params; + status = fwk_thread_get_delayed_response(event->target_id, + ctx->cookie, + &read_req_event); + + if (status == FWK_SUCCESS) { + resp_params->status = req_status; + if (return_opp) + resp_params->performance_level = ctx->current_opp.frequency; + status = fwk_thread_put_event(&read_req_event); + } + ctx->cookie = 0; + } else if (resp_event != NULL) { + resp_params = + (struct mod_dvfs_params_response *)resp_event->params; + resp_params->status = req_status; + if (return_opp) + resp_params->performance_level = ctx->current_opp.frequency; + } + + ctx->state = DVFS_DOMAIN_STATE_IDLE; + + return status; } -static int -__mod_dvfs_process_event(const struct fwk_event *event, - struct fwk_event *response) +/* + * The SET_OPP() request has successfully completed the first step, + * reading the voltage. + */ +static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, + const struct fwk_event *event, + uint64_t voltage) { - typedef int (*handler_t)( - const struct fwk_event *event, - struct fwk_event *response); + int status = FWK_SUCCESS; - static const handler_t handlers[] = { - [MOD_DVFS_EVENT_IDX_SET_FREQUENCY] = event_set_frequency, - [MOD_DVFS_EVENT_IDX_SET_FREQUENCY_LIMITS] = event_set_frequency_limits, - }; + if (ctx->request.new_opp.voltage > voltage) { + /* + * Current < request, increase voltage then set frequency + */ + status = ctx->apis.psu->set_voltage( + ctx->config->psu_id, ctx->request.new_opp.voltage); - handler_t handler; + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_FREQUENCY; + return status; + } - /* Ensure we have a handler implemented for this event */ - handler = handlers[fwk_id_get_event_idx(event->id)]; - if (handler == NULL) - return FWK_E_PARAM; + if (status != FWK_SUCCESS) + return dvfs_complete(ctx, event, NULL, status, false); + + /* + * Voltage set successsfully, continue to set the frequency + */ + status = ctx->apis.clock->set_rate( + ctx->config->clock_id, + ctx->request.new_opp.frequency, + MOD_CLOCK_ROUND_MODE_NONE); + + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_OPP_DONE; + return status; + } + } + + if (ctx->request.new_opp.voltage < voltage) { + /* + * Current > request, decrease frequency then set voltage + */ + status = ctx->apis.clock->set_rate( + ctx->config->clock_id, + ctx->request.new_opp.frequency, + MOD_CLOCK_ROUND_MODE_NONE); - /* Delegate event handling to the relevant handler */ - return handler(event, response); + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_VOLTAGE; + return status; + } + + if (status != FWK_SUCCESS) + return dvfs_complete(ctx, event, NULL, status, false); + + /* + * Clock set_rate() completed successfully, continue to set_voltage() + */ + status = ctx->apis.psu->set_voltage( + ctx->config->psu_id, ctx->request.new_opp.voltage); + + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_OPP_DONE; + return status; + } + } + + /* + * SET_OPP() completed, return to caller. + */ + if (status == FWK_SUCCESS) + dvfs_set_op_done(ctx); + + return dvfs_complete(ctx, event, NULL, status, false); +} + +/* + * The current voltage has been read. This is the first step of a SET_OPP() + * request and the only step of a GET_OPP() request. It may have been handled + * synchronously or asynchronously. Note that resp_event will only be set + * by a GET_OPP(), it will always be NULL for SET_OPP(). + */ +static int dvfs_handle_psu_get_voltage_resp(struct mod_dvfs_domain_ctx *ctx, + const struct fwk_event *event, + struct fwk_event *resp_event, + int req_status, + uint64_t voltage) +{ + const struct mod_dvfs_opp *opp; + + if (req_status != FWK_SUCCESS) + return dvfs_complete(ctx, event, resp_event, req_status, false); + + if (ctx->state == DVFS_DOMAIN_SET_OPP) + return dvfs_handle_set_opp(ctx, event, voltage); + + /* + * We have the actual voltage, get the frequency from the + * corresponding OPP in the domain context table. + */ + opp = get_opp_for_values(ctx, 0, voltage); + if (opp == NULL) + return dvfs_complete(ctx, event, resp_event, FWK_E_DEVICE, false); + + /* + * We have successfully found the frequency, save it in the domain context. + */ + ctx->current_opp.voltage = voltage; + ctx->current_opp.frequency = opp->frequency; + + /* + * This is a GET_OPP(), we are done, return the frequency to caller + */ + return dvfs_complete(ctx, event, resp_event, FWK_SUCCESS, true); +} + +/* + * Note that dvfs_handle_psu_set_voltage_resp() is only called after an + * asynchronous set_voltage() operation, we have already saved the + * cookie for the event. + */ +static int dvfs_handle_psu_set_voltage_resp(struct mod_dvfs_domain_ctx *ctx, + const struct fwk_event *event) +{ + int status = FWK_SUCCESS; + struct mod_psu_driver_response *psu_response = + (struct mod_psu_driver_response *)event->params; + + if (psu_response->status != FWK_SUCCESS) + return dvfs_complete(ctx, event, NULL, psu_response->status, false); + + if (ctx->state == DVFS_DOMAIN_SET_FREQUENCY) { + status = ctx->apis.clock->set_rate( + ctx->config->clock_id, + ctx->request.new_opp.frequency, + MOD_CLOCK_ROUND_MODE_NONE); + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_OPP_DONE; + return status; + } + } else if (ctx->state == DVFS_DOMAIN_SET_OPP_DONE) { + status = FWK_SUCCESS; + } else + status = FWK_E_DEVICE; + + /* + * SET_OPP() completed, return to caller. + */ + if (status == FWK_SUCCESS) + dvfs_set_op_done(ctx); + + return dvfs_complete(ctx, event, NULL, status, false); +} + +/* + * Note that dvfs_handle_clk_set_freq_resp() is only called after an + * asynchronous set_rate() operation, we have already saved the + * cookie for the event. + */ +static int dvfs_handle_clk_set_freq_resp(struct mod_dvfs_domain_ctx *ctx, + const struct fwk_event *event) +{ + int status; + struct mod_clock_driver_resp_params *clock_response = + (struct mod_clock_driver_resp_params *)event->params; + + if (clock_response->status != FWK_SUCCESS) + return dvfs_complete(ctx, event, NULL, clock_response->status, false); + + if (ctx->state == DVFS_DOMAIN_SET_VOLTAGE) { + /* + * Clock set_rate() completed successfully, continue to set_voltage() + */ + status = ctx->apis.psu->set_voltage( + ctx->config->psu_id, + ctx->request.new_opp.voltage); + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_OPP_DONE; + return status; + } + + } else if (ctx->state == DVFS_DOMAIN_SET_OPP_DONE) { + status = FWK_SUCCESS; + } else + status = FWK_E_DEVICE; + + /* + * SET_OPP() completed, return to caller. + */ + if (status == FWK_SUCCESS) + dvfs_set_op_done(ctx); + + return dvfs_complete(ctx, event, NULL, status, false); } /* * DVFS Module Framework Support */ -static int -dvfs_init(fwk_id_t module_id, unsigned int element_count, +static int mod_dvfs_process_event(const struct fwk_event *event, + struct fwk_event *resp_event) +{ + int status = FWK_SUCCESS; + struct mod_dvfs_domain_ctx *ctx; + struct mod_psu_driver_response *psu_response; + uint64_t voltage; + + ctx = get_domain_ctx(event->target_id); + if (ctx == NULL) + return FWK_E_PARAM; + + /* + * local DVFS event from dvfs_get_current_opp() + */ + if (fwk_id_is_equal(event->id, mod_dvfs_event_id_get_opp)) { + status = ctx->apis.psu->get_voltage( + ctx->config->psu_id, + &ctx->request.new_opp.voltage); + if (status == FWK_PENDING) { + ctx->cookie = event->cookie; + resp_event->is_delayed_response = true; + return FWK_SUCCESS; + } + + /* + * Handle get_voltage() synchronously + */ + return dvfs_handle_psu_get_voltage_resp(ctx, event, + resp_event, status, ctx->request.new_opp.voltage); + } + + /* + * local DVFS event from dvfs_set_frequency() + */ + if (fwk_id_is_equal(event->id, mod_dvfs_event_id_set)) { + if (ctx->current_opp.voltage != 0) { + voltage = ctx->current_opp.voltage; + status = FWK_SUCCESS; + } else { + status = ctx->apis.psu->get_voltage( + ctx->config->psu_id, + &voltage); + if (status == FWK_PENDING) + return FWK_SUCCESS; + } + + /* + * Handle get_voltage() synchronously + */ + return dvfs_handle_psu_get_voltage_resp(ctx, event, NULL, + status, voltage); + } + + /* + * response event from PSU get_voltage() + */ + if (fwk_id_is_equal(event->id, mod_psu_event_id_get_voltage)) { + /* + * Handle get_voltage() asynchronously + */ + psu_response = (struct mod_psu_driver_response *)event->params; + return dvfs_handle_psu_get_voltage_resp(ctx, event, NULL, + psu_response->status, psu_response->voltage); + } + + /* + * response event from PSU set_voltage() + */ + if (fwk_id_is_equal(event->id, mod_psu_event_id_set_voltage)) { + /* + * Handle set_voltage() asynchronously + */ + return dvfs_handle_psu_set_voltage_resp(ctx, event); + } + + /* + * response event from Clock set_rate() + */ + if (fwk_id_is_equal(event->id, mod_clock_event_id_request)) { + /* + * Handle set_frequency() asynchronously + */ + return dvfs_handle_clk_set_freq_resp(ctx, event); + } + + return FWK_E_PARAM; +} + +static int dvfs_init(fwk_id_t module_id, unsigned int element_count, const void *data) { domain_ctx = fwk_mm_calloc(element_count, sizeof((*domain_ctx)[0])); if (domain_ctx == NULL) return FWK_E_NOMEM; - + dvfs_domain_element_count = element_count; return FWK_SUCCESS; } @@ -504,7 +841,7 @@ dvfs_process_bind_request_module(fwk_id_t source_id, return FWK_E_PARAM; /* We don't do any permissions management */ - *api = &__mod_dvfs_domain_api; + *api = &mod_dvfs_domain_api; return FWK_SUCCESS; } @@ -530,7 +867,7 @@ const struct fwk_module module_dvfs = { .element_init = dvfs_element_init, .bind = dvfs_bind, .process_bind_request = dvfs_process_bind_request, - .process_event = __mod_dvfs_process_event, + .process_event = mod_dvfs_process_event, .api_count = MOD_DVFS_API_IDX_COUNT, .event_count = MOD_DVFS_EVENT_IDX_COUNT, }; diff --git a/module/scmi_perf/src/mod_scmi_perf.c b/module/scmi_perf/src/mod_scmi_perf.c index 2cb91247f..e990b5b60 100644 --- a/module/scmi_perf/src/mod_scmi_perf.c +++ b/module/scmi_perf/src/mod_scmi_perf.c @@ -804,6 +804,31 @@ static int process_request_event(const struct fwk_event *event) return FWK_E_PARAM; } +/* + * Handle a response event from the HAL which indicates that the + * requested operation has completed. + */ +static int process_response_event(const struct fwk_event *event) +{ + struct mod_dvfs_params_response *params_level; + struct scmi_perf_level_get_p2a return_values_level; + + if (fwk_id_is_equal(event->id, mod_dvfs_event_id_get_opp)) { + params_level = (struct mod_dvfs_params_response *) + event->params; + return_values_level = (struct scmi_perf_level_get_p2a) { + .status = params_level->status, + .performance_level = params_level->performance_level, + }; + scmi_perf_respond(&return_values_level, event->source_id, + (return_values_level.status == SCMI_SUCCESS) ? + sizeof(return_values_level) : + sizeof(return_values_level.status)); + } + + return FWK_SUCCESS; +} + static int scmi_perf_process_event(const struct fwk_event *event, struct fwk_event *resp_event) { @@ -812,10 +837,14 @@ static int scmi_perf_process_event(const struct fwk_event *event, fwk_id_get_module_idx(fwk_module_id_scmi)) return process_request_event(event); + /* Response events from DVFS */ + if (fwk_id_get_module_idx(event->source_id) == + fwk_id_get_module_idx(fwk_module_id_dvfs)) + return process_response_event(event); + return FWK_E_PARAM; } - /* SCMI Performance Management Protocol Definition */ const struct fwk_module module_scmi_perf = { .name = "SCMI Performance Management Protocol", -- GitLab From 9521a52c147cec466e6cf72f0c7793d44ffac1b1 Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Thu, 28 Nov 2019 14:51:36 +0000 Subject: [PATCH 08/10] DVFS: Add support for multiple concurrent SET_OPP requests Enable multiple SET_OPP requests to be serviced as a single 'collapsed' request where the data from the last service request overrides the earlier requests. Some requests cannot be allowed to fail. This patch adds retry functionality. Always set frequency regardless of voltage. If the requested voltage is equal to the actual voltage but the frequency has not yet been set we go ahead and set the frequency. Change-Id: I532bbd90104db399c275ecd55014c66e9b42d9ea Signed-off-by: Jim Quigley --- module/dvfs/include/mod_dvfs.h | 10 + module/dvfs/src/mod_dvfs.c | 463 +++++++++++++++++++------- product/juno/include/juno_alarm_idx.h | 10 +- 3 files changed, 356 insertions(+), 127 deletions(-) diff --git a/module/dvfs/include/mod_dvfs.h b/module/dvfs/include/mod_dvfs.h index 252108f16..964445d05 100644 --- a/module/dvfs/include/mod_dvfs.h +++ b/module/dvfs/include/mod_dvfs.h @@ -67,6 +67,16 @@ struct mod_dvfs_domain_config { */ fwk_id_t clock_id; + /*! + * \brief Alarm identifier. + * + * \warning This identifier must refer to an alarm of the \c timer module. + */ + fwk_id_t alarm_id; + + /*! Delay in milliseconds before retrying a request */ + uint16_t retry_ms; + /*! Worst-case transition latency in microseconds */ uint16_t latency; diff --git a/module/dvfs/src/mod_dvfs.c b/module/dvfs/src/mod_dvfs.c index cab49f01f..6bccd4ebf 100644 --- a/module/dvfs/src/mod_dvfs.c +++ b/module/dvfs/src/mod_dvfs.c @@ -20,6 +20,23 @@ #include #include #include +#include + +/* + * Maximum number of attempts to complete a request + */ +#define DVFS_MAX_RETRIES 4 + +enum mod_dvfs_internal_event_idx { + /* retry request */ + MOD_DVFS_INTERNAL_EVENT_IDX_RETRY = MOD_DVFS_EVENT_IDX_COUNT, + MOD_DVFS_INTERNAL_EVENT_IDX_COUNT, +}; + +/* Retry request event identifier */ +static const fwk_id_t mod_dvfs_event_id_retry = + FWK_ID_EVENT_INIT(FWK_MODULE_IDX_DVFS, + MOD_DVFS_INTERNAL_EVENT_IDX_RETRY); /*! * \brief Domain states for GET_OPP/SET_OPP. @@ -41,6 +58,9 @@ enum mod_dvfs_domain_state { /* waiting for SET_OPP to complete */ DVFS_DOMAIN_SET_OPP_DONE, + + /* waiting for alarm callback to start a retry */ + DVFS_DOMAIN_STATE_RETRY, }; /*! @@ -50,14 +70,26 @@ struct mod_dvfs_request { /* New operating point data for the request */ struct mod_dvfs_opp new_opp; - /* New operating point limits for the active request */ - struct mod_dvfs_frequency_limits frequency_limits; + /* Response expected for this request */ + bool response_required; + + /* Retry the request on failure */ + bool retry_request; + + /* This request requires the event source_id to be set */ + bool set_source_id; + + /* Retry count */ + uint8_t num_retries; }; /*! * \brief Domain context. */ struct mod_dvfs_domain_ctx { + /* Context Domain ID */ + fwk_id_t domain_id; + /* Domain configuration */ const struct mod_dvfs_domain_config *config; @@ -67,6 +99,10 @@ struct mod_dvfs_domain_ctx { /* Clock API */ const struct mod_clock_api *clock; + + /* Alarm API for pending requests */ + const struct mod_timer_alarm_api *alarm_api; + } apis; /* Number of operating points */ @@ -81,15 +117,26 @@ struct mod_dvfs_domain_ctx { /* Current request details */ struct mod_dvfs_request request; - /* next state */ + /* State */ enum mod_dvfs_domain_state state; - /* cookie for deferred request response */ + /* Cookie for deferred request response */ uint32_t cookie; + + /* Pending request details */ + struct mod_dvfs_request pending_request; + + /* SET_OPP Request is pending for this domain */ + bool request_pending; }; -static uint32_t dvfs_domain_element_count = 0; -static struct mod_dvfs_domain_ctx (*domain_ctx)[]; +struct mod_dvfs_ctx { + /* Number of DVFS domains */ + uint32_t dvfs_domain_element_count; + + /* DVFS device context table */ + struct mod_dvfs_domain_ctx (*domain_ctx)[]; +} dvfs_ctx; /* * DVFS Helper Functions @@ -98,8 +145,8 @@ static struct mod_dvfs_domain_ctx *get_domain_ctx(fwk_id_t domain_id) { uint32_t idx = fwk_id_get_element_idx(domain_id); - if (idx < dvfs_domain_element_count) - return &(*domain_ctx)[idx]; + if (idx < dvfs_ctx.dvfs_domain_element_count) + return &(*dvfs_ctx.domain_ctx)[idx]; else return NULL; } @@ -122,8 +169,11 @@ static const struct mod_dvfs_opp *get_opp_for_values( size_t opp_idx; const struct mod_dvfs_opp *opp; - /* A value of zero indicates the parameter should be ignored */ - fwk_assert((frequency != 0) || (voltage != 0)); + /* + * If both parameters are zero we have nothing to do. + */ + if ((frequency == 0) && (voltage == 0)) + return NULL; for (opp_idx = 0; opp_idx < ctx->opp_count; opp_idx++) { opp = &ctx->config->opps[opp_idx]; @@ -186,25 +236,122 @@ static const struct mod_dvfs_opp *adjust_opp_for_new_limits( /* * Helper to create events to process requests asynchronously */ -static int put_event_request(fwk_id_t domain_id, - struct mod_dvfs_domain_ctx *ctx, +static int put_event_request(struct mod_dvfs_domain_ctx *ctx, fwk_id_t event_id, - enum mod_dvfs_domain_state state, - bool response_required) + enum mod_dvfs_domain_state state) { struct fwk_event req; - ctx->state = state; - req = (struct fwk_event) { - .target_id = domain_id, + .target_id = ctx->domain_id, .id = event_id, - .response_requested = response_required, + .response_requested = ctx->request.response_required, }; + if (ctx->request.set_source_id) + req.source_id = ctx->domain_id; + + ctx->state = state; + return fwk_thread_put_event(&req); } +static int dvfs_set_frequency_start(struct mod_dvfs_domain_ctx *ctx, + const struct mod_dvfs_opp *new_opp, + bool retry_request) +{ + ctx->request.new_opp = *new_opp; + ctx->request.retry_request = retry_request; + ctx->request.response_required = false; + + return put_event_request(ctx, mod_dvfs_event_id_set, + DVFS_DOMAIN_SET_OPP); +} + +/* + * Handle pending requests + */ +static void dvfs_flush_pending_request(struct mod_dvfs_domain_ctx *ctx) +{ + if (ctx->request_pending) { + ctx->request_pending = false; + dvfs_set_frequency_start(ctx, &ctx->pending_request.new_opp, + ctx->pending_request.retry_request); + } + ctx->pending_request = (struct mod_dvfs_request){ 0 }; +} + +static void alarm_callback(uintptr_t param) +{ + struct fwk_event req; + struct mod_dvfs_domain_ctx *ctx = + (struct mod_dvfs_domain_ctx *)param; + + req = (struct fwk_event) { + .target_id = ctx->domain_id, + .source_id = ctx->domain_id, + .id = mod_dvfs_event_id_retry, + .response_requested = ctx->pending_request.response_required, + }; + + fwk_thread_put_event(&req); +} + +static int dvfs_handle_pending_request(struct mod_dvfs_domain_ctx *ctx) +{ + int status = FWK_SUCCESS; + + if (ctx->state == DVFS_DOMAIN_STATE_RETRY) + return FWK_SUCCESS; + + if (ctx->config->retry_ms > 0) { + status = ctx->apis.alarm_api->start(ctx->config->alarm_id, + ctx->config->retry_ms, MOD_TIMER_ALARM_TYPE_ONCE, + alarm_callback, (uintptr_t)ctx); + if (status == FWK_SUCCESS) + ctx->state = DVFS_DOMAIN_STATE_RETRY; + } else { + /* + * If this domain does not have a timeout configured we start + * processing the request immediately. + */ + dvfs_flush_pending_request(ctx); + } + return status; +} + +static int dvfs_create_pending_level_request(struct mod_dvfs_domain_ctx *ctx, + const struct mod_dvfs_opp *new_opp, bool retry_request) +{ + + if (ctx->request_pending) { + if ((new_opp->frequency == ctx->pending_request.new_opp.frequency) && + (new_opp->voltage == ctx->pending_request.new_opp.voltage)) + return FWK_SUCCESS; + } else { + if ((new_opp->frequency == ctx->current_opp.frequency) && + (new_opp->voltage == ctx->current_opp.voltage)) { + return FWK_SUCCESS; + + ctx->pending_request.num_retries = 0; + ctx->pending_request.set_source_id = false; + ctx->pending_request.response_required = false; + ctx->request_pending = true; + } + } + + ctx->pending_request.new_opp = *new_opp; + + /* + * Set the retry flag only if we have to retry the request. Otherwise don't + * overwrite it. + */ + if (retry_request) + ctx->pending_request.retry_request = retry_request; + + return FWK_SUCCESS; +} + /* * DVFS module synchronous API functions */ @@ -212,8 +359,6 @@ static int dvfs_get_sustained_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) { const struct mod_dvfs_domain_ctx *ctx; - fwk_assert(opp != NULL); - ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -232,7 +377,8 @@ static int dvfs_get_nth_opp(fwk_id_t domain_id, { const struct mod_dvfs_domain_ctx *ctx; - fwk_assert(opp != NULL); + if (opp == NULL) + return FWK_E_PARAM; ctx = get_domain_ctx(domain_id); if (ctx == NULL) @@ -250,7 +396,8 @@ static int dvfs_get_opp_count(fwk_id_t domain_id, size_t *opp_count) { const struct mod_dvfs_domain_ctx *ctx; - fwk_assert(opp_count != NULL); + if (opp_count == NULL) + return FWK_E_PARAM; ctx = get_domain_ctx(domain_id); if (ctx == NULL) @@ -265,7 +412,8 @@ static int dvfs_get_latency(fwk_id_t domain_id, uint16_t *latency) { const struct mod_dvfs_domain_ctx *ctx; - fwk_assert(latency != NULL); + if (latency == NULL) + return FWK_E_PARAM; ctx = get_domain_ctx(domain_id); if (ctx == NULL) @@ -281,6 +429,9 @@ static int dvfs_get_frequency_limits(fwk_id_t domain_id, { struct mod_dvfs_domain_ctx *ctx; + if (limits == NULL) + return FWK_E_PARAM; + ctx = get_domain_ctx(domain_id); if (ctx == NULL) return FWK_E_PARAM; @@ -297,7 +448,8 @@ static int dvfs_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) int status; struct mod_dvfs_domain_ctx *ctx; - fwk_assert(opp != NULL); + if (opp == NULL) + return FWK_E_PARAM; ctx = get_domain_ctx(domain_id); if (ctx == NULL) @@ -312,8 +464,9 @@ static int dvfs_get_current_opp(fwk_id_t domain_id, struct mod_dvfs_opp *opp) if (ctx->state != DVFS_DOMAIN_STATE_IDLE) return FWK_E_BUSY; - status = put_event_request(domain_id, ctx, - mod_dvfs_event_id_get_opp, DVFS_DOMAIN_GET_OPP, true); + ctx->request.response_required = true; + status = put_event_request(ctx, mod_dvfs_event_id_get_opp, + DVFS_DOMAIN_GET_OPP); if (status == FWK_SUCCESS) { /* * We return FWK_PENDING here to indicate to the caller that the @@ -338,33 +491,22 @@ static int dvfs_set_frequency(fwk_id_t domain_id, uint64_t frequency) if (ctx == NULL) return FWK_E_PARAM; - if (ctx->state != DVFS_DOMAIN_STATE_IDLE) - return FWK_E_BUSY; - /* Only accept frequencies that exist in the operating point table */ new_opp = get_opp_for_values(ctx, frequency, 0); if (new_opp == NULL) return FWK_E_RANGE; - if ((new_opp->frequency == ctx->current_opp.frequency) && - (new_opp->voltage == ctx->current_opp.voltage)) - return FWK_SUCCESS; - if (!is_opp_within_limits(new_opp, &ctx->frequency_limits)) return FWK_E_RANGE; - ctx->request.new_opp.frequency = new_opp->frequency; - ctx->request.new_opp.voltage = new_opp->voltage; - ctx->request.frequency_limits.minimum = 0; - ctx->request.frequency_limits.maximum = 0; + if (ctx->state != DVFS_DOMAIN_STATE_IDLE) + return dvfs_create_pending_level_request(ctx, new_opp, false); - /* - * Note that we do not return FWK_PENDING here. If an event - * is successfully queued we return FWK_SUCCESS to indicate - * that the caller should not expect a deferred response. - */ - return put_event_request(domain_id, ctx, mod_dvfs_event_id_set, - DVFS_DOMAIN_SET_OPP, false); + if ((new_opp->frequency == ctx->current_opp.frequency) && + (new_opp->voltage == ctx->current_opp.voltage)) + return FWK_SUCCESS; + + return dvfs_set_frequency_start(ctx, new_opp, false); } static int dvfs_set_frequency_limits(fwk_id_t domain_id, @@ -377,32 +519,24 @@ static int dvfs_set_frequency_limits(fwk_id_t domain_id, if (ctx == NULL) return FWK_E_PARAM; - if (ctx->state != DVFS_DOMAIN_STATE_IDLE) - return FWK_E_BUSY; - if (!are_limits_valid(ctx, limits)) return FWK_E_PARAM; new_opp = adjust_opp_for_new_limits(ctx, &ctx->current_opp, limits); + if (new_opp == NULL) + return FWK_E_PARAM; + + ctx->frequency_limits = *limits; if ((new_opp->frequency == ctx->current_opp.frequency) && (new_opp->voltage == ctx->current_opp.voltage)) { - ctx->frequency_limits = *limits; return FWK_SUCCESS; } - ctx->request.new_opp.frequency = new_opp->frequency; - ctx->request.new_opp.voltage = new_opp->voltage; - ctx->request.frequency_limits.minimum = limits->minimum; - ctx->request.frequency_limits.maximum = limits->maximum; + if (ctx->state != DVFS_DOMAIN_STATE_IDLE) + return dvfs_create_pending_level_request(ctx, new_opp, true); - /* - * Note that we do not return FWK_PENDING here. If an event - * is successfully queued we return FWK_SUCCESS to indicate - * that the caller should not expect a deferred response. - */ - return put_event_request(domain_id, ctx, mod_dvfs_event_id_set, - DVFS_DOMAIN_SET_OPP, false); + return dvfs_set_frequency_start(ctx, new_opp, true); } static const struct mod_dvfs_domain_api mod_dvfs_domain_api = { @@ -419,34 +553,31 @@ static const struct mod_dvfs_domain_api mod_dvfs_domain_api = { /* * DVFS utility functions */ -static void dvfs_set_op_done(struct mod_dvfs_domain_ctx *ctx) -{ - /* - * SET_OPP() successful, store the new values - */ - ctx->current_opp.voltage = ctx->request.new_opp.voltage; - ctx->current_opp.frequency = ctx->request.new_opp.frequency; - - if ((ctx->request.frequency_limits.minimum != 0) || - (ctx->request.frequency_limits.maximum != 0)) { - ctx->frequency_limits = ctx->request.frequency_limits; - } -} -static int dvfs_complete(struct mod_dvfs_domain_ctx *ctx, - const struct fwk_event *event, +/* + * DVFS Request Complete handling. + */ +static void dvfs_complete_respond(struct mod_dvfs_domain_ctx *ctx, struct fwk_event *resp_event, - int req_status, - bool return_opp) + int req_status) { - int status = req_status; + int status; struct fwk_event read_req_event; struct mod_dvfs_params_response *resp_params; + bool return_opp; + + if ((ctx->state == DVFS_DOMAIN_GET_OPP) && (req_status == FWK_SUCCESS)) + return_opp = true; if (ctx->cookie != 0) { + /* + * The request was handled asynchronously, retrieve + * the delayed_response and return it to the caller + * with the data. + */ resp_params = (struct mod_dvfs_params_response *) &read_req_event.params; - status = fwk_thread_get_delayed_response(event->target_id, + status = fwk_thread_get_delayed_response(ctx->domain_id, ctx->cookie, &read_req_event); @@ -454,18 +585,65 @@ static int dvfs_complete(struct mod_dvfs_domain_ctx *ctx, resp_params->status = req_status; if (return_opp) resp_params->performance_level = ctx->current_opp.frequency; - status = fwk_thread_put_event(&read_req_event); + fwk_thread_put_event(&read_req_event); } ctx->cookie = 0; } else if (resp_event != NULL) { + /* + * The request is being handled synchronously, return + * the data to the caller in the resp_event. + */ resp_params = (struct mod_dvfs_params_response *)resp_event->params; resp_params->status = req_status; if (return_opp) resp_params->performance_level = ctx->current_opp.frequency; } +} - ctx->state = DVFS_DOMAIN_STATE_IDLE; +static int dvfs_complete(struct mod_dvfs_domain_ctx *ctx, + struct fwk_event *resp_event, + int req_status) +{ + int status = req_status; + + if (ctx->request.response_required) { + /* + * If the DVFS request requires a response we send it now, no retries + * are attempted. + */ + dvfs_complete_respond(ctx, resp_event, req_status); + + } else if ((req_status != FWK_SUCCESS) && ctx->request.retry_request) { + /* + * No response required, request has failed, a retry is necessary. + */ + if (ctx->request.num_retries++ < DVFS_MAX_RETRIES) { + ctx->pending_request.retry_request = ctx->request.retry_request; + ctx->pending_request.num_retries = ctx->request.num_retries; + if (!ctx->request_pending) { + ctx->pending_request.new_opp = ctx->request.new_opp; + ctx->request_pending = true; + } + } + } + + /* + * Now we need to start processing the pending request if any, + * note that we do not set the state to DOMAIN_STATE_IDLE + * here to prevent another request being processed ahead of this one. + */ + if (ctx->request_pending) { + dvfs_handle_pending_request(ctx); + } else { + /* + * The request has completed and there are no pending requests. + * Clean up the domain ctx here. + */ + ctx->pending_request = (struct mod_dvfs_request){ 0 }; + ctx->request = (struct mod_dvfs_request){ 0 }; + ctx->state = DVFS_DOMAIN_STATE_IDLE; + } return status; } @@ -475,7 +653,6 @@ static int dvfs_complete(struct mod_dvfs_domain_ctx *ctx, * reading the voltage. */ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, - const struct fwk_event *event, uint64_t voltage) { int status = FWK_SUCCESS; @@ -493,7 +670,7 @@ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, } if (status != FWK_SUCCESS) - return dvfs_complete(ctx, event, NULL, status, false); + return dvfs_complete(ctx, NULL, status); /* * Voltage set successsfully, continue to set the frequency @@ -507,9 +684,7 @@ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, ctx->state = DVFS_DOMAIN_SET_OPP_DONE; return status; } - } - - if (ctx->request.new_opp.voltage < voltage) { + } else if (ctx->request.new_opp.voltage < voltage) { /* * Current > request, decrease frequency then set voltage */ @@ -524,7 +699,7 @@ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, } if (status != FWK_SUCCESS) - return dvfs_complete(ctx, event, NULL, status, false); + return dvfs_complete(ctx, NULL, status); /* * Clock set_rate() completed successfully, continue to set_voltage() @@ -536,15 +711,30 @@ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, ctx->state = DVFS_DOMAIN_SET_OPP_DONE; return status; } + } else if (ctx->current_opp.frequency == 0) { + /* + * At startup the voltage may be set without the frequency having + * been set. In this case we must set the frequency regardless of + * the voltage. + */ + status = ctx->apis.clock->set_rate( + ctx->config->clock_id, + ctx->request.new_opp.frequency, + MOD_CLOCK_ROUND_MODE_NONE); + + if (status == FWK_PENDING) { + ctx->state = DVFS_DOMAIN_SET_OPP_DONE; + return status; + } } /* * SET_OPP() completed, return to caller. */ if (status == FWK_SUCCESS) - dvfs_set_op_done(ctx); + ctx->current_opp = ctx->request.new_opp; - return dvfs_complete(ctx, event, NULL, status, false); + return dvfs_complete(ctx, NULL, status); } /* @@ -554,7 +744,6 @@ static int dvfs_handle_set_opp(struct mod_dvfs_domain_ctx *ctx, * by a GET_OPP(), it will always be NULL for SET_OPP(). */ static int dvfs_handle_psu_get_voltage_resp(struct mod_dvfs_domain_ctx *ctx, - const struct fwk_event *event, struct fwk_event *resp_event, int req_status, uint64_t voltage) @@ -562,10 +751,10 @@ static int dvfs_handle_psu_get_voltage_resp(struct mod_dvfs_domain_ctx *ctx, const struct mod_dvfs_opp *opp; if (req_status != FWK_SUCCESS) - return dvfs_complete(ctx, event, resp_event, req_status, false); + return dvfs_complete(ctx, resp_event, req_status); if (ctx->state == DVFS_DOMAIN_SET_OPP) - return dvfs_handle_set_opp(ctx, event, voltage); + return dvfs_handle_set_opp(ctx, voltage); /* * We have the actual voltage, get the frequency from the @@ -573,7 +762,7 @@ static int dvfs_handle_psu_get_voltage_resp(struct mod_dvfs_domain_ctx *ctx, */ opp = get_opp_for_values(ctx, 0, voltage); if (opp == NULL) - return dvfs_complete(ctx, event, resp_event, FWK_E_DEVICE, false); + return dvfs_complete(ctx, resp_event, FWK_E_DEVICE); /* * We have successfully found the frequency, save it in the domain context. @@ -584,13 +773,12 @@ static int dvfs_handle_psu_get_voltage_resp(struct mod_dvfs_domain_ctx *ctx, /* * This is a GET_OPP(), we are done, return the frequency to caller */ - return dvfs_complete(ctx, event, resp_event, FWK_SUCCESS, true); + return dvfs_complete(ctx, resp_event, FWK_SUCCESS); } /* * Note that dvfs_handle_psu_set_voltage_resp() is only called after an - * asynchronous set_voltage() operation, we have already saved the - * cookie for the event. + * asynchronous set_voltage() operation. */ static int dvfs_handle_psu_set_voltage_resp(struct mod_dvfs_domain_ctx *ctx, const struct fwk_event *event) @@ -600,7 +788,7 @@ static int dvfs_handle_psu_set_voltage_resp(struct mod_dvfs_domain_ctx *ctx, (struct mod_psu_driver_response *)event->params; if (psu_response->status != FWK_SUCCESS) - return dvfs_complete(ctx, event, NULL, psu_response->status, false); + return dvfs_complete(ctx, NULL, psu_response->status); if (ctx->state == DVFS_DOMAIN_SET_FREQUENCY) { status = ctx->apis.clock->set_rate( @@ -620,15 +808,14 @@ static int dvfs_handle_psu_set_voltage_resp(struct mod_dvfs_domain_ctx *ctx, * SET_OPP() completed, return to caller. */ if (status == FWK_SUCCESS) - dvfs_set_op_done(ctx); + ctx->current_opp = ctx->request.new_opp; - return dvfs_complete(ctx, event, NULL, status, false); + return dvfs_complete(ctx, NULL, status); } /* * Note that dvfs_handle_clk_set_freq_resp() is only called after an - * asynchronous set_rate() operation, we have already saved the - * cookie for the event. + * asynchronous set_rate() operation. */ static int dvfs_handle_clk_set_freq_resp(struct mod_dvfs_domain_ctx *ctx, const struct fwk_event *event) @@ -638,7 +825,7 @@ static int dvfs_handle_clk_set_freq_resp(struct mod_dvfs_domain_ctx *ctx, (struct mod_clock_driver_resp_params *)event->params; if (clock_response->status != FWK_SUCCESS) - return dvfs_complete(ctx, event, NULL, clock_response->status, false); + return dvfs_complete(ctx, NULL, clock_response->status); if (ctx->state == DVFS_DOMAIN_SET_VOLTAGE) { /* @@ -661,9 +848,9 @@ static int dvfs_handle_clk_set_freq_resp(struct mod_dvfs_domain_ctx *ctx, * SET_OPP() completed, return to caller. */ if (status == FWK_SUCCESS) - dvfs_set_op_done(ctx); + ctx->current_opp = ctx->request.new_opp; - return dvfs_complete(ctx, event, NULL, status, false); + return dvfs_complete(ctx, NULL, status); } /* @@ -685,8 +872,7 @@ static int mod_dvfs_process_event(const struct fwk_event *event, * local DVFS event from dvfs_get_current_opp() */ if (fwk_id_is_equal(event->id, mod_dvfs_event_id_get_opp)) { - status = ctx->apis.psu->get_voltage( - ctx->config->psu_id, + status = ctx->apis.psu->get_voltage(ctx->config->psu_id, &ctx->request.new_opp.voltage); if (status == FWK_PENDING) { ctx->cookie = event->cookie; @@ -697,7 +883,7 @@ static int mod_dvfs_process_event(const struct fwk_event *event, /* * Handle get_voltage() synchronously */ - return dvfs_handle_psu_get_voltage_resp(ctx, event, + return dvfs_handle_psu_get_voltage_resp(ctx, resp_event, status, ctx->request.new_opp.voltage); } @@ -709,8 +895,7 @@ static int mod_dvfs_process_event(const struct fwk_event *event, voltage = ctx->current_opp.voltage; status = FWK_SUCCESS; } else { - status = ctx->apis.psu->get_voltage( - ctx->config->psu_id, + status = ctx->apis.psu->get_voltage(ctx->config->psu_id, &voltage); if (status == FWK_PENDING) return FWK_SUCCESS; @@ -719,38 +904,50 @@ static int mod_dvfs_process_event(const struct fwk_event *event, /* * Handle get_voltage() synchronously */ - return dvfs_handle_psu_get_voltage_resp(ctx, event, NULL, + return dvfs_handle_psu_get_voltage_resp(ctx, NULL, status, voltage); } + /* + * local DVFS event from dvfs_flush_pending_request() + */ + if (fwk_id_is_equal(event->id, mod_dvfs_event_id_retry)) { + ctx->request.set_source_id = false; + return dvfs_set_frequency_start(ctx, &ctx->pending_request.new_opp, + ctx->pending_request.retry_request); + } + /* * response event from PSU get_voltage() */ if (fwk_id_is_equal(event->id, mod_psu_event_id_get_voltage)) { /* - * Handle get_voltage() asynchronously + * Handle get_voltage() asynchronously, cookie will have been saved + * above so we can safely discard the resp_event. */ psu_response = (struct mod_psu_driver_response *)event->params; - return dvfs_handle_psu_get_voltage_resp(ctx, event, NULL, + return dvfs_handle_psu_get_voltage_resp(ctx, NULL, psu_response->status, psu_response->voltage); } /* - * response event from PSU set_voltage() + * response event from SET_OPP() PSU set_voltage() */ if (fwk_id_is_equal(event->id, mod_psu_event_id_set_voltage)) { /* - * Handle set_voltage() asynchronously + * Handle set_voltage() asynchronously, no response required for + * a SET_OPP() request so resp_event discarded. */ return dvfs_handle_psu_set_voltage_resp(ctx, event); } /* - * response event from Clock set_rate() + * response event from SET_OPP() Clock set_rate() */ if (fwk_id_is_equal(event->id, mod_clock_event_id_request)) { /* - * Handle set_frequency() asynchronously + * Handle set_frequency() asynchronously, no response required for + * a SET_OPP() request so resp_event discarded. */ return dvfs_handle_clk_set_freq_resp(ctx, event); } @@ -758,13 +955,19 @@ static int mod_dvfs_process_event(const struct fwk_event *event, return FWK_E_PARAM; } +/* + * Module framework support + */ static int dvfs_init(fwk_id_t module_id, unsigned int element_count, const void *data) { - domain_ctx = fwk_mm_calloc(element_count, sizeof((*domain_ctx)[0])); - if (domain_ctx == NULL) + dvfs_ctx.domain_ctx = fwk_mm_calloc(element_count, + sizeof((*dvfs_ctx.domain_ctx)[0])); + if (dvfs_ctx.domain_ctx == NULL) return FWK_E_NOMEM; - dvfs_domain_element_count = element_count; + + dvfs_ctx.dvfs_domain_element_count = element_count; + return FWK_SUCCESS; } @@ -777,6 +980,8 @@ dvfs_element_init(fwk_id_t domain_id, fwk_assert(sub_element_count == 0); + ctx->domain_id = domain_id; + /* Initialize the configuration */ ctx->config = data; fwk_assert(ctx->config->opps != NULL); @@ -805,26 +1010,32 @@ dvfs_bind_element(fwk_id_t domain_id, unsigned int round) return FWK_SUCCESS; /* Bind to the power supply module */ - status = fwk_module_bind( - ctx->config->psu_id, mod_psu_api_id_device, &ctx->apis.psu); + status = fwk_module_bind(ctx->config->psu_id, mod_psu_api_id_device, + &ctx->apis.psu); if (status != FWK_SUCCESS) return FWK_E_PANIC; /* Bind to the clock module */ - status = fwk_module_bind( - ctx->config->clock_id, - FWK_ID_API(FWK_MODULE_IDX_CLOCK, 0), - &ctx->apis.clock); + status = fwk_module_bind(ctx->config->clock_id, + FWK_ID_API(FWK_MODULE_IDX_CLOCK, 0), &ctx->apis.clock); if (status != FWK_SUCCESS) return FWK_E_PANIC; + /* Bind to the alarm HAL if required */ + if (ctx->config->retry_ms > 0) { + status = fwk_module_bind(ctx->config->alarm_id, MOD_TIMER_API_ID_ALARM, + &ctx->apis.alarm_api); + if (status != FWK_SUCCESS) + return FWK_E_PANIC; + } + return FWK_SUCCESS; } static int dvfs_bind(fwk_id_t id, unsigned int round) { - /* We only need to handle binding our elements */ + /* Bind our elements */ if (fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) return dvfs_bind_element(id, round); @@ -869,5 +1080,5 @@ const struct fwk_module module_dvfs = { .process_bind_request = dvfs_process_bind_request, .process_event = mod_dvfs_process_event, .api_count = MOD_DVFS_API_IDX_COUNT, - .event_count = MOD_DVFS_EVENT_IDX_COUNT, + .event_count = MOD_DVFS_INTERNAL_EVENT_IDX_COUNT, }; diff --git a/product/juno/include/juno_alarm_idx.h b/product/juno/include/juno_alarm_idx.h index 26bffa3af..0c71e9e07 100644 --- a/product/juno/include/juno_alarm_idx.h +++ b/product/juno/include/juno_alarm_idx.h @@ -17,7 +17,15 @@ enum juno_xrp7724_alarm_idx { JUNO_XRP7724_ALARM_IDX_COUNT, }; +/* Alarm indices for DVFS */ +enum juno_dvfs_alarm_idx { + JUNO_DVFS_ALARM_VLITTLE_IDX = JUNO_XRP7724_ALARM_IDX_COUNT, + JUNO_DVFS_ALARM_BIG_IDX, + JUNO_DVFS_ALARM_GPU_IDX, + JUNO_DVFS_ALARM_IDX_CNT, +}; + /* Total count of alarms */ -#define JUNO_ALARM_IDX_COUNT JUNO_XRP7724_ALARM_IDX_COUNT +#define JUNO_ALARM_IDX_COUNT JUNO_DVFS_ALARM_IDX_CNT #endif /* JUNO_ALARM_IDX_H */ -- GitLab From 46879f3bf31522b3a4911f043e81c89ecfede3af Mon Sep 17 00:00:00 2001 From: Jim Quigley Date: Fri, 6 Dec 2019 10:28:48 +0000 Subject: [PATCH 09/10] DVFS: Set DVFS sustained OPP at startup Set DVFS sustained OPP for each device at startup. Change-Id: Ie2f94d4186c43003ce979c76b6f007ea7358d3fc Signed-off-by: Jim Quigley --- module/dvfs/src/mod_dvfs.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/module/dvfs/src/mod_dvfs.c b/module/dvfs/src/mod_dvfs.c index 6bccd4ebf..dc3b2eca9 100644 --- a/module/dvfs/src/mod_dvfs.c +++ b/module/dvfs/src/mod_dvfs.c @@ -958,6 +958,31 @@ static int mod_dvfs_process_event(const struct fwk_event *event, /* * Module framework support */ + +/* + * The DVFS voltage/frequency must be set to the sustained OPP at startup + * for each domain. + */ +static int dvfs_start(fwk_id_t id) +{ + int status; + struct mod_dvfs_opp sustained_opp; + struct mod_dvfs_domain_ctx *ctx; + + if (!fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT)) + return FWK_SUCCESS; + + status = dvfs_get_sustained_opp(id, &sustained_opp); + if (status == FWK_SUCCESS) { + ctx = get_domain_ctx(id); + ctx->request.set_source_id = true; + ctx->request.num_retries = 0; + dvfs_set_frequency_start(ctx, &sustained_opp, true); + } + + return status; +} + static int dvfs_init(fwk_id_t module_id, unsigned int element_count, const void *data) { @@ -1076,6 +1101,7 @@ const struct fwk_module module_dvfs = { .type = FWK_MODULE_TYPE_HAL, .init = dvfs_init, .element_init = dvfs_element_init, + .start = dvfs_start, .bind = dvfs_bind, .process_bind_request = dvfs_process_bind_request, .process_event = mod_dvfs_process_event, -- GitLab From 4b719ecd008ba4b3e12ae124ea150dbdedd4308d Mon Sep 17 00:00:00 2001 From: Elieva Pignat Date: Thu, 1 Aug 2019 16:38:59 +0100 Subject: [PATCH 10/10] juno: Add PSU channels support for juno XRP7724 This patch adds support for the PSU driver of the XRP7724 component of the juno board. It provides an implementation for the PSU driver API and uses the I2C HAL API to make PSU request to the XRP7724 component. Change-Id: I2c0b910307de92e49f23336ca16606c1cf12fbcb Signed-off-by: Elieva Pignat Signed-off-by: Jim Quigley --- .../juno_xrp7724/include/mod_juno_xrp7724.h | 37 +- .../juno_xrp7724/src/mod_juno_xrp7724.c | 506 ++++++++++++++++-- product/juno/scp_ramfw/config_juno_xrp7724.c | 59 +- product/juno/scp_ramfw/config_juno_xrp7724.h | 4 + product/juno/scp_ramfw/config_psu.c | 63 +++ product/juno/scp_ramfw/config_psu.h | 22 + product/juno/scp_ramfw/firmware.mk | 6 +- 7 files changed, 649 insertions(+), 48 deletions(-) create mode 100644 product/juno/scp_ramfw/config_psu.c create mode 100644 product/juno/scp_ramfw/config_psu.h diff --git a/product/juno/module/juno_xrp7724/include/mod_juno_xrp7724.h b/product/juno/module/juno_xrp7724/include/mod_juno_xrp7724.h index 13de7a28c..b9eb37870 100644 --- a/product/juno/module/juno_xrp7724/include/mod_juno_xrp7724.h +++ b/product/juno/module/juno_xrp7724/include/mod_juno_xrp7724.h @@ -27,12 +27,25 @@ struct mod_juno_xrp7724_config { /*! Slave address of the I2C device */ unsigned int slave_address; + /*! Identifier of the I2C HAL */ fwk_id_t i2c_hal_id; + /*! Identifier of the timer */ fwk_id_t timer_hal_id; + + /*! + * \brief Identifier of the alarm. + * + * \note When setting the voltage, it is necessary to wait for the PSU to + * \note stabilize at the new voltage. We use the Timer HAL to insert a + * \note short delay for this. + */ + fwk_id_t alarm_hal_id; + /*! Identifier of the GPIO for the assert command */ fwk_id_t gpio_assert_id; + /*! Identifier of the GPIO for the system mode selection */ fwk_id_t gpio_mode_id; }; @@ -43,11 +56,12 @@ struct mod_juno_xrp7724_config { enum mod_juno_xrp7724_element_type { MOD_JUNO_XRP7724_ELEMENT_TYPE_GPIO, MOD_JUNO_XRP7724_ELEMENT_TYPE_SENSOR, + MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU, MOD_JUNO_XRP7724_ELEMENT_TYPE_COUNT, }; /*! - * \brief Element configuration. + * \brief Element configuration */ struct mod_juno_xrp7724_dev_config { /*! Identifier of the element for the driver response */ @@ -57,7 +71,21 @@ struct mod_juno_xrp7724_dev_config { fwk_id_t driver_response_api_id; /*! - * \brief Sensor information. + * \brief Identifier of the ADC sensor associated with the PSU channel. + * + * \note Only provided for a PSU element + */ + fwk_id_t psu_adc_id; + + /*! + * \brief Index of the bus associated to the PSU. + * + * \note Only provided for a PSU element + */ + uint8_t psu_bus_idx; + + /*! + * \brief Sensor information * * \note Only provided for a sensor element */ @@ -90,6 +118,7 @@ struct mod_juno_xrp7724_api_system_mode { enum mod_juno_xrp7724_api_idx { MOD_JUNO_XRP7724_API_IDX_SENSOR, MOD_JUNO_XRP7724_API_IDX_SYSTEM_MODE, + MOD_JUNO_XRP7724_API_IDX_PSU, MOD_JUNO_XRP7724_API_IDX_COUNT, }; @@ -101,6 +130,10 @@ static const fwk_id_t mod_juno_xrp7724_api_id_system_mode = FWK_ID_API_INIT( static const fwk_id_t mod_juno_xrp7724_api_id_sensor = FWK_ID_API_INIT( FWK_MODULE_IDX_JUNO_XRP7724, MOD_JUNO_XRP7724_API_IDX_SENSOR); +/*! Identifier of the PSU driver API */ +static const fwk_id_t mod_juno_xrp7724_api_id_psu = FWK_ID_API_INIT( + FWK_MODULE_IDX_JUNO_XRP7724, MOD_JUNO_XRP7724_API_IDX_PSU); + /*! * \} */ diff --git a/product/juno/module/juno_xrp7724/src/mod_juno_xrp7724.c b/product/juno/module/juno_xrp7724/src/mod_juno_xrp7724.c index 4373ad10d..3a54072f0 100644 --- a/product/juno/module/juno_xrp7724/src/mod_juno_xrp7724.c +++ b/product/juno/module/juno_xrp7724/src/mod_juno_xrp7724.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,40 @@ /* Read temperature command */ #define SENSOR_READ_TEMP 0x15 +/* + * PSU Properties + */ +#define PSU_VOUT_PRESCALE_UV 2500 /* Low range */ +#define PSU_VOUT_STEP_COARSE_UV (5 * PSU_VOUT_PRESCALE_UV) +#define PSU_VOUT_STEP_FINE_UV PSU_VOUT_PRESCALE_UV +#define PSU_MVOUT_SCALE_READ 15 +#define PSU_FINE_ADJUST_POS 12 +#define PSU_MAX_FINE_ADJUST 0x7 +#define PSU_CHANNEL_COUNT 4 + +#define PSU_MAX_VOUT_MV 1100 + +/* Ramping/Settling time when changing voltage in ms */ +#define PSU_RAMP_DELAY_SET_MS 1 + +/* Ramping/Settling time when enabling a channel in ms */ +#define PSU_RAMP_DELAY_ENABLE_MS 2 + +#define PSU_TARGET_MARGIN_MV 20 + +/* + * Power Command Set + */ +#define PSU_PWR_GET_VOLTAGE_CHx 0x10 +#define PSU_PWR_ENABLE_SUPPLY 0x1E +#define PSU_PWR_SET_VOLTAGE_CHx 0x20 + +#define PSU_WRITE_LENGTH 3 +#define PSU_READ_LENGTH 2 + +#define PSU_CHANNEL_ENABLED 0x1 +#define PSU_CHANNEL_DISABLED 0x0 + enum juno_xrp7724_event_idx { JUNO_XRP7724_EVENT_IDX_REQUEST, JUNO_XRP7724_EVENT_IDX_COUNT @@ -58,14 +93,52 @@ enum juno_xrp7724_sensor_request { JUNO_XRP7724_SENSOR_REQUEST_CONVERT_VALUE, }; +enum juno_xrp7724_psu_request { + JUNO_XRP7724_PSU_REQUEST_IDLE, + JUNO_XRP7724_PSU_REQUEST_READ_VOLTAGE, + JUNO_XRP7724_PSU_REQUEST_CONVERT_VOLTAGE, + JUNO_XRP7724_PSU_REQUEST_SET_VOLTAGE, + JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_VOLTAGE, + JUNO_XRP7724_PSU_REQUEST_COMPARE_VOLTAGE, + JUNO_XRP7724_PSU_REQUEST_SET_ENABLED, + JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_ENABLED, + JUNO_XRP7724_PSU_REQUEST_DONE_ENABLED, +}; + +struct psu_set_enabled_param { + bool enabled; +}; + +struct psu_set_voltage_param { + uint64_t voltage; + uint16_t set_value; +}; + struct juno_xrp7724_ctx { const struct mod_juno_xrp7724_config *config; const struct mod_sensor_driver_response_api *sensor_driver_response_api; + const struct mod_psu_driver_response_api *psu_driver_response_api; const struct mod_i2c_api *i2c_api; const struct mod_timer_api *timer_api; + const struct mod_timer_alarm_api *alarm_api; + const struct mod_sensor_api *adc_api; enum juno_xrp7724_gpio_request gpio_request; enum juno_xrp7724_sensor_request sensor_request; - bool driver_skipped; + + /* + * Note that as all PSUs are accessible via a single I2C bus only one + * request may be active at any time. + */ + enum juno_xrp7724_psu_request psu_request; +}; + +struct juno_xrp7724_dev_psu_ctx { + /* Indicate for a PSU element whether the channel is enabled */ + bool is_psu_channel_enabled; + uint32_t wait_event_cookie; + fwk_id_t element_id; + bool psu_set_enabled; + uint64_t psu_set_voltage; }; struct juno_xrp7724_dev_ctx { @@ -73,6 +146,8 @@ struct juno_xrp7724_dev_ctx { uint32_t cookie; uint8_t transmit_data[TRANSMIT_DATA_MAX]; uint8_t receive_data[RECEIVE_DATA_MAX]; + /* PSU data for the device */ + struct juno_xrp7724_dev_psu_ctx juno_xrp7724_dev_psu; }; static const fwk_id_t juno_xrp7724_event_id_request = @@ -87,8 +162,6 @@ static int set_gpio(fwk_id_t id, struct juno_xrp7724_dev_ctx *ctx) { int status; - fwk_assert(fwk_module_is_valid_sub_element_id(id)); - ctx->transmit_data[0] = GPIO_WRITE_CMD; ctx->transmit_data[1] = fwk_id_get_sub_element_idx(id); ctx->transmit_data[2] = SET_GPIO_MASK; @@ -114,6 +187,24 @@ static uint64_t temperature_to_millidegree_celsius(uint16_t temp) return (temp * 5000ULL) - 272150; } +/* Helper function for the PSU API */ +static void alarm_callback(uintptr_t param) +{ + int status; + struct fwk_event event; + const struct juno_xrp7724_dev_ctx *ctx = + (struct juno_xrp7724_dev_ctx *)param; + + event = (struct fwk_event) { + .source_id = ctx->juno_xrp7724_dev_psu.element_id, + .target_id = ctx->juno_xrp7724_dev_psu.element_id, + .id = juno_xrp7724_event_id_request, + }; + + status = fwk_thread_put_event(&event); + fwk_assert(status == FWK_SUCCESS); +} + /* * Functions for the system mode API */ @@ -179,7 +270,6 @@ static int juno_xrp7724_sensor_get_value(fwk_id_t id, uint64_t *value) fwk_assert(fwk_module_is_valid_element_id(id)); - if (module_ctx.sensor_request != JUNO_XRP7724_SENSOR_REQUEST_IDLE) return FWK_E_BUSY; @@ -207,7 +297,6 @@ static int juno_xrp7724_sensor_get_info(fwk_id_t id, { const struct juno_xrp7724_dev_ctx *ctx; - fwk_assert(fwk_module_is_valid_element_id(id)); fwk_assert(info != NULL); ctx = &ctx_table[fwk_id_get_element_idx(id)]; @@ -223,6 +312,151 @@ static const struct mod_sensor_driver_api sensor_driver_api = { .get_info = juno_xrp7724_sensor_get_info, }; +/* + * Driver functions for the PSU API + */ +static int juno_xrp7724_set_enabled(fwk_id_t id, bool enabled) +{ + int status; + struct fwk_event event; + const struct juno_xrp7724_dev_ctx *ctx; + struct psu_set_enabled_param *param = + (struct psu_set_enabled_param *)event.params; + + ctx = &ctx_table[fwk_id_get_element_idx(id)]; + fwk_assert(ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU); + + /* + * We only have a single I2C bus so only one I2C request is allowed + * at any one time. + */ + if (module_ctx.psu_request != JUNO_XRP7724_PSU_REQUEST_IDLE) + return FWK_E_BUSY; + + event = (struct fwk_event) { + .target_id = id, + .id = juno_xrp7724_event_id_request, + }; + + param->enabled = enabled; + + status = fwk_thread_put_event(&event); + if (status != FWK_SUCCESS) + return status; + + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_SET_ENABLED; + + return FWK_PENDING; +} + +static int juno_xrp7724_get_enabled(fwk_id_t id, bool *enabled) +{ + const struct juno_xrp7724_dev_ctx *ctx; + + if (enabled == NULL) + return FWK_E_PARAM; + + ctx = &ctx_table[fwk_id_get_element_idx(id)]; + fwk_assert(ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU); + + *enabled = ctx->juno_xrp7724_dev_psu.is_psu_channel_enabled; + + return FWK_SUCCESS; +} + +static int juno_xrp7724_set_voltage(fwk_id_t id, uint64_t voltage) +{ + int status; + struct fwk_event event; + uint16_t set_value; + uint32_t mvref; + uint8_t fine_adj; + uint32_t coarse_val; + const struct juno_xrp7724_dev_ctx *ctx; + struct psu_set_voltage_param *param = + (struct psu_set_voltage_param *)event.params; + + /* Platform voltage cap */ + if (voltage > PSU_MAX_VOUT_MV) + return FWK_E_RANGE; + + ctx = &ctx_table[fwk_id_get_element_idx(id)]; + fwk_assert(ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU); + + if (module_ctx.psu_request != JUNO_XRP7724_PSU_REQUEST_IDLE) + return FWK_E_BUSY; + + /* Compute the number of coarse voltage steps */ + coarse_val = (voltage * 1000) / PSU_VOUT_STEP_COARSE_UV; + + /* Compute the resulting voltage in mV */ + mvref = (coarse_val * PSU_VOUT_STEP_COARSE_UV) / 1000; + + /* + * Compute the number of fine steps required to reduce the error. + * The truncation means in the worst case, we get just under 2.5mV of + * undervoltage. + */ + fine_adj = ((voltage - mvref) * 1000) / PSU_VOUT_STEP_FINE_UV; + + if (fine_adj > PSU_MAX_FINE_ADJUST) + fine_adj = PSU_MAX_FINE_ADJUST; + + set_value = (fine_adj << PSU_FINE_ADJUST_POS) | coarse_val; + + event = (struct fwk_event) { + .target_id = id, + .id = juno_xrp7724_event_id_request, + }; + + param->set_value = set_value; + param->voltage = voltage; + + status = fwk_thread_put_event(&event); + if (status != FWK_SUCCESS) + return status; + + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_SET_VOLTAGE; + + return FWK_PENDING; +} + +static int juno_xrp7724_get_voltage(fwk_id_t id, uint64_t *voltage) +{ + int status; + struct fwk_event event; + const struct juno_xrp7724_dev_ctx *ctx; + + if (voltage == NULL) + return FWK_E_PARAM; + + ctx = &ctx_table[fwk_id_get_element_idx(id)]; + fwk_assert(ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU); + + if (module_ctx.psu_request != JUNO_XRP7724_PSU_REQUEST_IDLE) + return FWK_E_BUSY; + + event = (struct fwk_event) { + .target_id = id, + .id = juno_xrp7724_event_id_request, + }; + + status = fwk_thread_put_event(&event); + if (status != FWK_SUCCESS) + return status; + + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_READ_VOLTAGE; + + return FWK_PENDING; +} + +static struct mod_psu_driver_api psu_driver_api = { + .set_enabled = juno_xrp7724_set_enabled, + .get_enabled = juno_xrp7724_get_enabled, + .set_voltage = juno_xrp7724_set_voltage, + .get_voltage = juno_xrp7724_get_voltage, +}; + /* * Framework handlers */ @@ -230,9 +464,6 @@ static int juno_xrp7724_init(fwk_id_t module_id, unsigned int element_count, const void *data) { - int status; - enum juno_idx_platform platform_id; - fwk_assert(data != NULL); module_ctx.config = (struct mod_juno_xrp7724_config *)data; @@ -242,15 +473,6 @@ static int juno_xrp7724_init(fwk_id_t module_id, if (ctx_table == NULL) return FWK_E_NOMEM; - status = juno_id_get_platform(&platform_id); - if (!fwk_expect(status == FWK_SUCCESS)) - return FWK_E_PANIC; - - if (platform_id == JUNO_IDX_PLATFORM_FVP) { - /* XRP7724 hardware is only available in the board */ - module_ctx.driver_skipped = true; - } - return FWK_SUCCESS; } @@ -270,6 +492,13 @@ static int juno_xrp7724_element_init(fwk_id_t element_id, return FWK_E_DATA; } + if (ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU) { + if (ctx->config->psu_bus_idx >= PSU_CHANNEL_COUNT) + return FWK_E_DATA; + ctx->juno_xrp7724_dev_psu.is_psu_channel_enabled = true; + ctx->juno_xrp7724_dev_psu.element_id = element_id; + } + return FWK_SUCCESS; } @@ -277,27 +506,31 @@ static int juno_xrp7724_bind(fwk_id_t id, unsigned int round) { int status; const struct mod_juno_xrp7724_config *config = module_ctx.config; - struct juno_xrp7724_dev_ctx *ctx; + const struct juno_xrp7724_dev_ctx *ctx; - /* Only bind in first round of calls and if the driver is available */ - if ((round > 0) || (module_ctx.driver_skipped)) + /* Only bind in first round of calls */ + if (round > 0) return FWK_SUCCESS; if (fwk_id_is_type(id, FWK_ID_TYPE_MODULE)) { /* Bind to I2C HAL */ - status = fwk_module_bind(config->i2c_hal_id, - FWK_ID_API(FWK_MODULE_IDX_I2C, MOD_I2C_API_IDX_I2C), + status = fwk_module_bind(config->i2c_hal_id, mod_i2c_api_id_i2c, &module_ctx.i2c_api); if (status != FWK_SUCCESS) return FWK_E_HANDLER; /* Bind to timer HAL */ - status = fwk_module_bind(config->timer_hal_id, - FWK_ID_API(FWK_MODULE_IDX_TIMER, MOD_TIMER_API_IDX_TIMER), + status = fwk_module_bind(config->timer_hal_id, MOD_TIMER_API_ID_TIMER, &module_ctx.timer_api); if (status != FWK_SUCCESS) return FWK_E_HANDLER; + /* Bind to the alarm HAL */ + status = fwk_module_bind(config->alarm_hal_id, MOD_TIMER_API_ID_ALARM, + &module_ctx.alarm_api); + if (status != FWK_SUCCESS) + return FWK_E_HANDLER; + return FWK_SUCCESS; } @@ -311,6 +544,19 @@ static int juno_xrp7724_bind(fwk_id_t id, unsigned int round) return FWK_E_HANDLER; } + if (ctx->config->type == MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU) { + status = fwk_module_bind(ctx->config->driver_response_id, + ctx->config->driver_response_api_id, + &module_ctx.psu_driver_response_api); + if (status != FWK_SUCCESS) + return FWK_E_HANDLER; + + status = fwk_module_bind(ctx->config->psu_adc_id, + mod_sensor_api_id_sensor, &module_ctx.adc_api); + if (status != FWK_SUCCESS) + return FWK_E_HANDLER; + } + return FWK_SUCCESS; } @@ -319,13 +565,12 @@ static int juno_xrp7724_process_bind_request(fwk_id_t source_id, fwk_id_t api_id, const void **api) { - if (module_ctx.driver_skipped) - return FWK_E_ACCESS; - if (fwk_id_is_equal(api_id, mod_juno_xrp7724_api_id_system_mode)) *api = &system_mode_api; else if (fwk_id_is_equal(api_id, mod_juno_xrp7724_api_id_sensor)) *api = &sensor_driver_api; + else if (fwk_id_is_equal(api_id, mod_juno_xrp7724_api_id_psu)) + *api = &psu_driver_api; else return FWK_E_PARAM; @@ -342,8 +587,6 @@ static int juno_xrp7724_gpio_process_request(fwk_id_t id, int response_status) struct juno_xrp7724_dev_ctx *ctx; const struct mod_juno_xrp7724_config *config = module_ctx.config; - fwk_assert(fwk_module_is_valid_element_id(id)); - ctx = ctx_table + fwk_id_get_element_idx(id); switch (module_ctx.gpio_request) { @@ -433,7 +676,6 @@ static int juno_xrp7724_sensor_process_request(fwk_id_t id, int status) { struct juno_xrp7724_dev_ctx *ctx; uint64_t temp = 0; - int request_status = status; struct mod_sensor_driver_resp_params resp_params; const struct mod_juno_xrp7724_config *module_config = module_ctx.config; @@ -441,27 +683,29 @@ static int juno_xrp7724_sensor_process_request(fwk_id_t id, int status) switch (module_ctx.sensor_request) { case JUNO_XRP7724_SENSOR_REQUEST_READ_VALUE: - module_ctx.sensor_request = JUNO_XRP7724_SENSOR_REQUEST_CONVERT_VALUE; - ctx->transmit_data[0] = SENSOR_READ_TEMP; - - request_status = + status = module_ctx.i2c_api->transmit_then_receive_as_master( module_config->i2c_hal_id, module_config->slave_address, ctx->transmit_data, ctx->receive_data, SENSOR_WRITE_LENGTH, SENSOR_READ_LENGTH); - if (request_status == FWK_PENDING) - return FWK_SUCCESS; + if (status == FWK_PENDING) { + module_ctx.sensor_request = + JUNO_XRP7724_SENSOR_REQUEST_CONVERT_VALUE; + return FWK_SUCCESS; + } - break; + if (status != FWK_SUCCESS) + break; + /* FALLTHRU */ case JUNO_XRP7724_SENSOR_REQUEST_CONVERT_VALUE: /* - * The request_status parameter contains the I2C transaction status. + * The status parameter contains the I2C transaction status. * The conversion is done if the read of the sensor value has been * successful. */ - if (request_status == FWK_SUCCESS) { + if (status == FWK_SUCCESS) { temp = temperature_to_millidegree_celsius( ((uint16_t)ctx->receive_data[0] << 8) | ctx->receive_data[1]); } @@ -469,12 +713,12 @@ static int juno_xrp7724_sensor_process_request(fwk_id_t id, int status) break; default: - request_status = FWK_E_PARAM; + status = FWK_E_PARAM; } module_ctx.sensor_request = JUNO_XRP7724_SENSOR_REQUEST_IDLE; - resp_params.status = request_status; + resp_params.status = status; resp_params.value = temp; module_ctx.sensor_driver_response_api->reading_complete( @@ -484,6 +728,181 @@ static int juno_xrp7724_sensor_process_request(fwk_id_t id, int status) return FWK_SUCCESS; } + +static int juno_xrp7724_psu_process_request(fwk_id_t id, + const uint8_t *event_params, int status) +{ + uint16_t set_value; + uint64_t adc_val; + struct juno_xrp7724_dev_ctx *ctx; + struct mod_psu_driver_response driver_response; + const struct mod_juno_xrp7724_config *module_config = module_ctx.config; + + ctx = &ctx_table[fwk_id_get_element_idx(id)]; + + switch (module_ctx.psu_request) { + case JUNO_XRP7724_PSU_REQUEST_READ_VOLTAGE: + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_CONVERT_VOLTAGE; + + ctx->transmit_data[0] = PSU_PWR_GET_VOLTAGE_CHx + + ctx->config->psu_bus_idx; + + status = module_ctx.i2c_api->transmit_then_receive_as_master( + module_config->i2c_hal_id, module_config->slave_address, + ctx->transmit_data, ctx->receive_data, 1, PSU_READ_LENGTH); + if (status == FWK_PENDING) + return FWK_SUCCESS; + + if (status != FWK_SUCCESS) + break; + + /* FALLTHRU */ + + case JUNO_XRP7724_PSU_REQUEST_CONVERT_VOLTAGE: + /* + * If the I2C transaction completed successfully convert the voltage + */ + if (status == FWK_SUCCESS) { + driver_response.voltage = (((uint16_t)ctx->receive_data[0] << 8) | + ctx->receive_data[1]) * PSU_MVOUT_SCALE_READ; + } + + break; + + case JUNO_XRP7724_PSU_REQUEST_SET_VOLTAGE: + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_VOLTAGE; + + set_value = ((struct psu_set_voltage_param *)event_params)->set_value; + ctx->juno_xrp7724_dev_psu.psu_set_voltage = + ((struct psu_set_voltage_param *)event_params)->voltage; + + ctx->transmit_data[0] = PSU_PWR_SET_VOLTAGE_CHx + + ctx->config->psu_bus_idx; + ctx->transmit_data[1] = (uint8_t)(set_value >> 8); + ctx->transmit_data[2] = (uint8_t)(set_value & 0xFF); + + status = module_ctx.i2c_api->transmit_as_master( + module_config->i2c_hal_id, module_config->slave_address, + ctx->transmit_data, PSU_WRITE_LENGTH); + if (status == FWK_PENDING) + return FWK_SUCCESS; + + if (status != FWK_SUCCESS) + break; + + /* FALLTHRU */ + + case JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_VOLTAGE: + /* Check I2C response status */ + if (status != FWK_SUCCESS) + break; + + /* + * If the channel is currently enabled then the voltage will ramp to the + * new value and it is necessary to wait for it to stabilize before + * checking the final output voltage using the board's ADC. + * + * If the channel is not enabled then there is no need to wait or read + * back the output voltage. + */ + if (ctx->juno_xrp7724_dev_psu.is_psu_channel_enabled) { + status = module_ctx.alarm_api->start(module_config->alarm_hal_id, + PSU_RAMP_DELAY_SET_MS, MOD_TIMER_ALARM_TYPE_ONCE, + alarm_callback, (uintptr_t)ctx); + if (status != FWK_SUCCESS) + break; + + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_COMPARE_VOLTAGE; + + return FWK_SUCCESS; + } + + /* + * If channel is not enabled there is nothing more to do. + */ + status = FWK_SUCCESS; + break; + + case JUNO_XRP7724_PSU_REQUEST_COMPARE_VOLTAGE: + status = module_ctx.adc_api->get_value(ctx->config->psu_adc_id, + &adc_val); + if (status != FWK_SUCCESS) + break; + + if (((adc_val + PSU_TARGET_MARGIN_MV) < + ctx->juno_xrp7724_dev_psu.psu_set_voltage) || + ((adc_val - PSU_TARGET_MARGIN_MV) > + ctx->juno_xrp7724_dev_psu.psu_set_voltage)) + status = FWK_E_DEVICE; + + break; + + case JUNO_XRP7724_PSU_REQUEST_SET_ENABLED: + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_ENABLED; + + ctx->juno_xrp7724_dev_psu.psu_set_enabled = + ((struct psu_set_enabled_param *)event_params)->enabled; + ctx->transmit_data[0] = PSU_PWR_ENABLE_SUPPLY, + ctx->transmit_data[1] = ctx->config->psu_bus_idx; + ctx->transmit_data[2] = ctx->juno_xrp7724_dev_psu.psu_set_enabled ? + PSU_CHANNEL_ENABLED : PSU_CHANNEL_DISABLED; + + status = module_ctx.i2c_api->transmit_as_master( + module_config->i2c_hal_id, module_config->slave_address, + ctx->transmit_data, PSU_WRITE_LENGTH); + if (status == FWK_PENDING) + return FWK_SUCCESS; + + if (status != FWK_SUCCESS) + break; + + /* FALLTHRU */ + + case JUNO_XRP7724_PSU_REQUEST_WAIT_FOR_ENABLED: + /* Check I2C response status */ + if (status == FWK_SUCCESS) { + /* + * The transaction was successful so the stored channel + * status can be updated. + */ + ctx->juno_xrp7724_dev_psu.is_psu_channel_enabled = + ctx->juno_xrp7724_dev_psu.psu_set_enabled; + + /* + * Wait a fixed time for the voltage to stabilize. + */ + if (ctx->juno_xrp7724_dev_psu.is_psu_channel_enabled) { + status = module_ctx.alarm_api->start( + module_config->alarm_hal_id, PSU_RAMP_DELAY_ENABLE_MS, + MOD_TIMER_ALARM_TYPE_ONCE, alarm_callback, (uintptr_t)ctx); + if (status != FWK_SUCCESS) + break; + + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_DONE_ENABLED; + return FWK_SUCCESS; + } else + status = FWK_SUCCESS; + } + + break; + + case JUNO_XRP7724_PSU_REQUEST_DONE_ENABLED: + status = FWK_SUCCESS; + break; + + default: + return FWK_E_SUPPORT; + } + + driver_response.status = status; + module_ctx.psu_request = JUNO_XRP7724_PSU_REQUEST_IDLE; + + module_ctx.psu_driver_response_api->respond(ctx->config->driver_response_id, + driver_response); + + return FWK_SUCCESS; +} + /* * This function is called when a request event is received for the XRP7724 and * when an I2C transaction is completed. @@ -496,8 +915,6 @@ static int juno_xrp7724_process_event(const struct fwk_event *event, struct mod_i2c_event_param *param = (struct mod_i2c_event_param *)event->params; - fwk_assert(fwk_module_is_valid_element_id(event->target_id)); - ctx = ctx_table + fwk_id_get_element_idx(event->target_id); switch (ctx->config->type) { @@ -518,6 +935,9 @@ static int juno_xrp7724_process_event(const struct fwk_event *event, return juno_xrp7724_sensor_process_request(event->target_id, param->status); + case MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU: + return juno_xrp7724_psu_process_request(event->target_id, + event->params, param->status); default: return FWK_E_PARAM; } diff --git a/product/juno/scp_ramfw/config_juno_xrp7724.c b/product/juno/scp_ramfw/config_juno_xrp7724.c index 1e1ec69e6..dc99a00f6 100644 --- a/product/juno/scp_ramfw/config_juno_xrp7724.c +++ b/product/juno/scp_ramfw/config_juno_xrp7724.c @@ -11,10 +11,13 @@ #include #include #include +#include #include #include +#include #include #include +#include enum mod_juno_xrp7724_gpio_idx { MOD_JUNO_XRP7724_GPIO_IDX_ASSERT, @@ -50,7 +53,59 @@ static const struct fwk_element juno_xrp7724_element_table[] = { .data = &(const struct mod_juno_xrp7724_dev_config) { .type = MOD_JUNO_XRP7724_ELEMENT_TYPE_GPIO, }, - }, + }, + [MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VSYS] = { + .name = "VSYS", + .data = &(const struct mod_juno_xrp7724_dev_config) { + .driver_response_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_ELEMENT_IDX_VSYS), + .driver_response_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_API_IDX_DRIVER_RESPONSE), + .psu_adc_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SENSOR, + MOD_JUNO_SENSOR_VOLT_SYS_IDX), + .psu_bus_idx = 0, + .type = MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU, + }, + }, + [MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VBIG] = { + .name = "VBIG", + .data = &(const struct mod_juno_xrp7724_dev_config) { + .driver_response_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_ELEMENT_IDX_VBIG), + .driver_response_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_API_IDX_DRIVER_RESPONSE), + .psu_adc_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SENSOR, + MOD_JUNO_SENSOR_VOLT_BIG_IDX), + .psu_bus_idx = 1, + .type = MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU, + }, + }, + [MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VLITTLE] = { + .name = "VLITTLE", + .data = &(const struct mod_juno_xrp7724_dev_config) { + .driver_response_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_ELEMENT_IDX_VLITTLE), + .driver_response_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_API_IDX_DRIVER_RESPONSE), + .psu_adc_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SENSOR, + MOD_JUNO_SENSOR_VOLT_LITTLE_IDX), + .psu_bus_idx = 2, + .type = MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU, + }, + }, + [MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VGPU] = { + .name = "VGPU", + .data = &(const struct mod_juno_xrp7724_dev_config) { + .driver_response_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_ELEMENT_IDX_VGPU), + .driver_response_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PSU, + MOD_PSU_API_IDX_DRIVER_RESPONSE), + .psu_adc_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SENSOR, + MOD_JUNO_SENSOR_VOLT_GPU_IDX), + .psu_bus_idx = 3, + .type = MOD_JUNO_XRP7724_ELEMENT_TYPE_PSU, + }, + }, [MOD_JUNO_XRP7724_ELEMENT_IDX_COUNT] = { 0 }, }; @@ -67,6 +122,8 @@ const struct fwk_module_config config_juno_xrp7724 = { .slave_address = 0x28, .i2c_hal_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_I2C, 0), .timer_hal_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_TIMER, 0), + .alarm_hal_id = FWK_ID_SUB_ELEMENT_INIT(FWK_MODULE_IDX_TIMER, 0, + JUNO_XRP7724_ALARM_IDX), .gpio_assert_id = FWK_ID_SUB_ELEMENT_INIT(FWK_MODULE_IDX_JUNO_XRP7724, MOD_JUNO_XRP7724_ELEMENT_IDX_GPIO, MOD_JUNO_XRP7724_GPIO_IDX_ASSERT), diff --git a/product/juno/scp_ramfw/config_juno_xrp7724.h b/product/juno/scp_ramfw/config_juno_xrp7724.h index 78e6714ee..917a48015 100644 --- a/product/juno/scp_ramfw/config_juno_xrp7724.h +++ b/product/juno/scp_ramfw/config_juno_xrp7724.h @@ -12,6 +12,10 @@ enum mod_juno_xrp7724_element_idx { MOD_JUNO_XRP7724_ELEMENT_IDX_SENSOR, MOD_JUNO_XRP7724_ELEMENT_IDX_GPIO, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VSYS, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VBIG, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VLITTLE, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VGPU, MOD_JUNO_XRP7724_ELEMENT_IDX_COUNT }; diff --git a/product/juno/scp_ramfw/config_psu.c b/product/juno/scp_ramfw/config_psu.c new file mode 100644 index 000000000..b6849730f --- /dev/null +++ b/product/juno/scp_ramfw/config_psu.c @@ -0,0 +1,63 @@ +/* + * Arm SCP/MCP Software + * Copyright (c) 2019, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct fwk_element psu_dev_desc_table[] = { + [MOD_PSU_ELEMENT_IDX_VSYS] = { + .name = "VSYS", + .data = &(const struct mod_psu_element_cfg) { + .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VSYS), + .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_API_IDX_PSU), + }, + }, + [MOD_PSU_ELEMENT_IDX_VBIG] = { + .name = "VBIG", + .data = &(const struct mod_psu_element_cfg) { + .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VBIG), + .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_API_IDX_PSU), + }, + }, + [MOD_PSU_ELEMENT_IDX_VLITTLE] = { + .name = "VLITLLE", + .data = &(const struct mod_psu_element_cfg) { + .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VLITTLE), + .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_API_IDX_PSU), + }, + }, + [MOD_PSU_ELEMENT_IDX_VGPU] = { + .name = "VGPU", + .data = &(const struct mod_psu_element_cfg) { + .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_ELEMENT_IDX_PSU_VGPU), + .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_JUNO_XRP7724, + MOD_JUNO_XRP7724_API_IDX_PSU), + }, + }, + [MOD_PSU_ELEMENT_IDX_COUNT] = { 0 }, +}; + +static const struct fwk_element *psu_get_dev_desc_table(fwk_id_t module_id) +{ + return psu_dev_desc_table; +} + +struct fwk_module_config config_psu = { + .get_element_table = psu_get_dev_desc_table, +}; diff --git a/product/juno/scp_ramfw/config_psu.h b/product/juno/scp_ramfw/config_psu.h new file mode 100644 index 000000000..a231c83ca --- /dev/null +++ b/product/juno/scp_ramfw/config_psu.h @@ -0,0 +1,22 @@ +/* + * Arm SCP/MCP Software + * Copyright (c) 2019, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CONFIG_PSU_H +#define CONFIG_PSU_H + +/* PSU indices for Juno */ +enum mod_juno_psu_element_idx { + MOD_PSU_ELEMENT_IDX_VSYS, + MOD_PSU_ELEMENT_IDX_VBIG, + MOD_PSU_ELEMENT_IDX_VLITTLE, + MOD_PSU_ELEMENT_IDX_VGPU, + + /* Number of sensors */ + MOD_PSU_ELEMENT_IDX_COUNT +}; + +#endif /* CONFIG_PSU_H */ diff --git a/product/juno/scp_ramfw/firmware.mk b/product/juno/scp_ramfw/firmware.mk index 1e079f6d6..205e14805 100644 --- a/product/juno/scp_ramfw/firmware.mk +++ b/product/juno/scp_ramfw/firmware.mk @@ -42,7 +42,8 @@ BS_FIRMWARE_MODULES := \ juno_adc \ juno_xrp7724 \ sensor \ - reg_sensor + reg_sensor \ + psu BS_FIRMWARE_SOURCES := \ rtx_config.c \ @@ -70,6 +71,7 @@ BS_FIRMWARE_SOURCES := \ config_juno_adc.c \ config_juno_xrp7724.c \ config_sensor.c \ - config_reg_sensor.c + config_reg_sensor.c \ + config_psu.c include $(BS_DIR)/firmware.mk -- GitLab