diff --git a/ethosu/regor/compiler/graph_optimiser.cpp b/ethosu/regor/compiler/graph_optimiser.cpp index 4a456a6f59e846a2beecea38c416a9c3222bea83..7f0d9cf171a91df09e40fb1497c36bb82d55ac1c 100644 --- a/ethosu/regor/compiler/graph_optimiser.cpp +++ b/ethosu/regor/compiler/graph_optimiser.cpp @@ -147,9 +147,10 @@ Operation *GraphOptimiser::RemoveReshape(Graph *const graph, Operation *const op // Check if ifm/ofm are network ifm/ofm or constant bool isIfmConst = ifm->IsConstant(); - bool isIfmSgIfm = IsTensorInVector(graph->Inputs(), ifm); - bool isOfmSgOfm = IsTensorInVector(graph->Outputs(), ofm); - bool isIfmSgOfm = IsTensorInVector(graph->Outputs(), ifm); + // Determine whether the tensors belong to the graph IO using the dedicated helpers on Graph + bool isIfmSgIfm = graph->IsInput(ifm); + bool isOfmSgOfm = graph->IsOutput(ofm); + bool isIfmSgOfm = graph->IsOutput(ifm); // Check if ifm/ofm is produced/consumed by a CPU operation auto isPassthroughOp = [](const std::shared_ptr &op) { return op->Type() == OpType::Passthrough; }; diff --git a/ethosu/regor/compiler/graphir_optimiser.cpp b/ethosu/regor/compiler/graphir_optimiser.cpp index 0ce06bb451b7b3d122ab7115948e886315c3dbdc..7aa25a8b7ea688666fe05123acbca4566d4ca50c 100644 --- a/ethosu/regor/compiler/graphir_optimiser.cpp +++ b/ethosu/regor/compiler/graphir_optimiser.cpp @@ -1151,7 +1151,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op // Note (ZeroPoints): For input fusing we cannot have an output zero point on the Rescale operation (since the // zero point is applied before scaling on inputs), however input zero point is fine. if ( ofmConn->tensor->Readers().size() == 1 && ofmConn->quantization.zeroPoints == Quantization::Unit().zeroPoints && - !IsTensorInVector(graph->Outputs(), ofmConn->tensor.get()) ) + !graph->IsOutput(ofmConn->tensor.get()) ) { // Propagate rescaling to input of next op auto consumer = ofmConn->tensor->Readers().front(); @@ -1193,8 +1193,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op { // Check if the other ifm rescale can be fused auto opSign = otherProducer->Attribute(); - bool otherFusedTensorInGraphOutputs = IsTensorInVector( - graph->Outputs(), consumerOtherIfmCon->tensor.get()); + bool otherFusedTensorInGraphOutputs = graph->IsOutput(consumerOtherIfmCon->tensor.get()); bool otherRescaleUnsigned = opSign ? (opSign->input_unsigned || opSign->output_unsigned) : false; auto otherIfmQuant = otherProducer->Input(TensorUsage::IFM)->quantization; @@ -1240,7 +1239,7 @@ Operation *GraphIrOptimiser::FuseRescale(Graph *const graph, Operation *const op producer->Output(TensorUsage::OFM)->quantization.EqualScales(Quantization::Unit()) && ifmConn->quantization.zeroPoints == Quantization::Unit().zeroPoints && // fused tensor cannot be in graph-outputs - !IsTensorInVector(graph->Outputs(), ifmConn->tensor.get()) && + !graph->IsOutput(ifmConn->tensor.get()) && _constraints->SupportsFusedRescale(producer->Type(), TensorUsage::OFM, ifmConn->tensor->Type(), ofmConn->tensor->Type(), producer->IFM(0)->Type(), producer->OFM()->Type(), ofmQuant) ) { diff --git a/ethosu/regor/compiler/optimiser_utils.cpp b/ethosu/regor/compiler/optimiser_utils.cpp index bcf90fda4c3c6ec8abf61fb6aa1ed52ae8965b1f..4f51636ee9dbe8d432584881a8e465f06b478e7f 100644 --- a/ethosu/regor/compiler/optimiser_utils.cpp +++ b/ethosu/regor/compiler/optimiser_utils.cpp @@ -23,16 +23,6 @@ namespace regor::GraphOptimisation { -// Find specified tensor in Inputs() / Outputs() vectors. -// returns true if found in given vector. -bool IsTensorInVector(const std::vector> &tensorVec, const Tensor *const tensorToFind) -{ - auto pos = std::find_if( - tensorVec.begin(), tensorVec.end(), [&](const std::shared_ptr &t) { return t.get() == tensorToFind; }); - - return (pos != tensorVec.end()); -} - // Insert a MemoryCopy operation after given ifm tensor. Returns a copy op shared_ptr. // Will make a clone of ifm as ofm and connects any other consumers of the ifm to it. std::shared_ptr InsertCopyOpAfterTensor(const std::shared_ptr &ifm, const Quantization &quantization) diff --git a/ethosu/regor/compiler/optimiser_utils.hpp b/ethosu/regor/compiler/optimiser_utils.hpp index 9c8f243e2fbe6ff29d4dde8d23dc2c2406dcc89f..5ae17e77d81e819aeed32782c49e266b57c170c8 100644 --- a/ethosu/regor/compiler/optimiser_utils.hpp +++ b/ethosu/regor/compiler/optimiser_utils.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -26,9 +26,6 @@ namespace regor::GraphOptimisation { -// Find specified tensor in Inputs() / Outputs() vectors. -// returns true if found in given vector. -bool IsTensorInVector(const std::vector> &tensorVec, const Tensor *const tensorToFind); // Insert a MemoryCopy operation after given ifm tensor. Returns a copy op shared_ptr. // Will make a clone of ifm as ofm and connects any other consumers of the ifm to it.