From c410198c17a084b1a01af80dae49cf8e72ce0a79 Mon Sep 17 00:00:00 2001 From: William Isaksson Date: Mon, 23 Jun 2025 18:43:44 +0200 Subject: [PATCH] MLBEDSW-10284: Refactor: Remove IsTensorInVector Replace usages with Graph::IsInput/IsOutput and delete redundant helper. Change-Id: Ib7e0635005576a1a1b0ff803c43499979106cf58 Signed-off-by: William Isaksson --- ethosu/regor/compiler/graph_optimiser.cpp | 7 ++++--- ethosu/regor/compiler/graphir_optimiser.cpp | 7 +++---- ethosu/regor/compiler/optimiser_utils.cpp | 10 ---------- ethosu/regor/compiler/optimiser_utils.hpp | 5 +---- 4 files changed, 8 insertions(+), 21 deletions(-) diff --git a/ethosu/regor/compiler/graph_optimiser.cpp b/ethosu/regor/compiler/graph_optimiser.cpp index 4a456a6f..7f0d9cf1 100644 --- a/ethosu/regor/compiler/graph_optimiser.cpp +++ b/ethosu/regor/compiler/graph_optimiser.cpp @@ -147,9 +147,10 @@ Operation *GraphOptimiser::RemoveReshape(Graph *const graph, Operation *const op // Check if ifm/ofm are network ifm/ofm or constant bool isIfmConst = ifm->IsConstant(); - bool isIfmSgIfm = IsTensorInVector(graph->Inputs(), ifm); - bool isOfmSgOfm = IsTensorInVector(graph->Outputs(), ofm); - bool isIfmSgOfm = IsTensorInVector(graph->Outputs(), ifm); + // Determine whether the tensors belong to the graph IO using the dedicated helpers on Graph + bool isIfmSgIfm = graph->IsInput(ifm); + bool isOfmSgOfm = graph->IsOutput(ofm); + bool isIfmSgOfm = graph->IsOutput(ifm); // Check if ifm/ofm is produced/consumed by a CPU operation auto isPassthroughOp = [](const std::shared_ptr &op) { return op->Type() == OpType::Passthrough; }; diff --git a/ethosu/regor/compiler/graphir_optimiser.cpp b/ethosu/regor/compiler/graphir_optimiser.cpp index 0ce06bb4..7aa25a8b 100644 --- a/ethosu/regor/compiler/graphir_optimiser.cpp +++ b/ethosu/regor/compiler/graphir_optimiser.cpp @@ -1151,7 +1151,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op // Note (ZeroPoints): For input fusing we cannot have an output zero point on the Rescale operation (since the // zero point is applied before scaling on inputs), however input zero point is fine. if ( ofmConn->tensor->Readers().size() == 1 && ofmConn->quantization.zeroPoints == Quantization::Unit().zeroPoints && - !IsTensorInVector(graph->Outputs(), ofmConn->tensor.get()) ) + !graph->IsOutput(ofmConn->tensor.get()) ) { // Propagate rescaling to input of next op auto consumer = ofmConn->tensor->Readers().front(); @@ -1193,8 +1193,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op { // Check if the other ifm rescale can be fused auto opSign = otherProducer->Attribute(); - bool otherFusedTensorInGraphOutputs = IsTensorInVector( - graph->Outputs(), consumerOtherIfmCon->tensor.get()); + bool otherFusedTensorInGraphOutputs = graph->IsOutput(consumerOtherIfmCon->tensor.get()); bool otherRescaleUnsigned = opSign ? (opSign->input_unsigned || opSign->output_unsigned) : false; auto otherIfmQuant = otherProducer->Input(TensorUsage::IFM)->quantization; @@ -1240,7 +1239,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op producer->Output(TensorUsage::OFM)->quantization.EqualScales(Quantization::Unit()) && ifmConn->quantization.zeroPoints == Quantization::Unit().zeroPoints && // fused tensor cannot be in graph-outputs - !IsTensorInVector(graph->Outputs(), ifmConn->tensor.get()) && + !graph->IsOutput(ifmConn->tensor.get()) && _constraints->SupportsFusedRescale(producer->Type(), TensorUsage::OFM, ifmConn->tensor->Type(), ofmConn->tensor->Type(), producer->IFM(0)->Type(), producer->OFM()->Type(), ofmQuant) ) { diff --git a/ethosu/regor/compiler/optimiser_utils.cpp b/ethosu/regor/compiler/optimiser_utils.cpp index bcf90fda..4f51636e 100644 --- a/ethosu/regor/compiler/optimiser_utils.cpp +++ b/ethosu/regor/compiler/optimiser_utils.cpp @@ -23,16 +23,6 @@ namespace regor::GraphOptimisation { -// Find specified tensor in Inputs() / Outputs() vectors. -// returns true if found in given vector. -bool IsTensorInVector(const std::vector> &tensorVec, const Tensor *const tensorToFind) -{ - auto pos = std::find_if( - tensorVec.begin(), tensorVec.end(), [&](const std::shared_ptr &t) { return t.get() == tensorToFind; }); - - return (pos != tensorVec.end()); -} - // Insert a MemoryCopy operation after given ifm tensor. Returns a copy op shared_ptr. // Will make a clone of ifm as ofm and connects any other consumers of the ifm to it. std::shared_ptr InsertCopyOpAfterTensor(const std::shared_ptr &ifm, const Quantization &quantization) diff --git a/ethosu/regor/compiler/optimiser_utils.hpp b/ethosu/regor/compiler/optimiser_utils.hpp index 9c8f243e..5ae17e77 100644 --- a/ethosu/regor/compiler/optimiser_utils.hpp +++ b/ethosu/regor/compiler/optimiser_utils.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -26,9 +26,6 @@ namespace regor::GraphOptimisation { -// Find specified tensor in Inputs() / Outputs() vectors. -// returns true if found in given vector. -bool IsTensorInVector(const std::vector> &tensorVec, const Tensor *const tensorToFind); // Insert a MemoryCopy operation after given ifm tensor. Returns a copy op shared_ptr. // Will make a clone of ifm as ofm and connects any other consumers of the ifm to it. -- GitLab