From affb48913367dde63966046b2faa740cc61ca165 Mon Sep 17 00:00:00 2001 From: Johan Gunnarsson Date: Tue, 22 Apr 2025 17:54:18 +0200 Subject: [PATCH] MLBEDSW-10634: Handle shapeless Conv2D/DepthwiseConv2D better * Allow shapeless Convolutions in semantics checker. * Check for valid shape when calculating padding. * Check for valid shape when calculating output shape. Signed-off-by: Johan Gunnarsson Change-Id: Ib7d55fcd4f3323f6de01396d56c061f3ab3c5805 --- .../regor/tflite/tflite_model_semantics.cpp | 84 +++++++++++-------- ethosu/regor/tflite/tflite_reader.cpp | 10 ++- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/ethosu/regor/tflite/tflite_model_semantics.cpp b/ethosu/regor/tflite/tflite_model_semantics.cpp index 3d1b3a6f..ca33f241 100644 --- a/ethosu/regor/tflite/tflite_model_semantics.cpp +++ b/ethosu/regor/tflite/tflite_model_semantics.cpp @@ -313,17 +313,22 @@ void ConstraintConvGroupsIfmDepth(const Operator &op, const SubGraph &subgraph, { auto ifm = TensorFromUsage(regor::TensorUsage::IFM, op, builtinOperator, *subgraph.tensors()); auto weights = TensorFromUsage(regor::TensorUsage::Weights, op, builtinOperator, *subgraph.tensors()); - auto ifmDepth = ShapeFromTens(ifm)[-1]; - auto kernelIc = ShapeFromTens(weights)[-1]; - if ( kernelIc == 0 || kernelIc < 0 || ifmDepth < 0 ) - { - throw std::runtime_error("Error: Out of bounds\n"); - } - if ( ifmDepth % kernelIc != 0 ) + auto ifmShape = ShapeFromTens(ifm); + auto weightShape = ShapeFromTens(weights); + if ( ifmShape && weightShape ) { - std::string constraint = "IFM depth must be a whole multiple of the filter kernel depth"; - std::string extra = fmt::format("IFM depth = {} and filter kernel depth = {}", ifmDepth, kernelIc); - throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + auto ifmDepth = ifmShape[-1]; + auto kernelIc = weightShape[-1]; + if ( kernelIc == 0 || kernelIc < 0 || ifmDepth < 0 ) + { + throw std::runtime_error("Error: Out of bounds\n"); + } + if ( ifmDepth % kernelIc != 0 ) + { + std::string constraint = "IFM depth must be a whole multiple of the filter kernel depth"; + std::string extra = fmt::format("IFM depth = {} and filter kernel depth = {}", ifmDepth, kernelIc); + throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + } } } @@ -331,20 +336,24 @@ void ConstraintConvGroupsNumFilters(const Operator &op, const SubGraph &subgraph { auto ifm = TensorFromUsage(regor::TensorUsage::IFM, op, builtinOperator, *subgraph.tensors()); auto weights = TensorFromUsage(regor::TensorUsage::Weights, op, builtinOperator, *subgraph.tensors()); - auto ifmDepth = ShapeFromTens(ifm)[-1]; - auto kernelIc = ShapeFromTens(weights)[-1]; - auto kernelOc = ShapeFromTens(weights)[0]; - - auto numConvGroups = ifmDepth / kernelIc; - if ( numConvGroups == 0 || kernelOc < 0 || numConvGroups < 0 ) - { - throw std::runtime_error("Error: Out of bounds\n"); - } - if ( kernelOc % numConvGroups != 0 ) + auto ifmShape = ShapeFromTens(ifm); + auto weightsShape = ShapeFromTens(weights); + if ( ifmShape && weightsShape ) { - std::string constraint = "Number of filter kernels must be equally divisible by the number of convolution groups"; - std::string extra = fmt::format("Conv Groups = {}, filter kernels = {}", numConvGroups, kernelOc); - throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + auto ifmDepth = ifmShape[-1]; + auto kernelIc = weightsShape[-1]; + auto kernelOc = weightsShape[0]; + auto numConvGroups = ifmDepth / kernelIc; + if ( numConvGroups == 0 || kernelOc < 0 || numConvGroups < 0 ) + { + throw std::runtime_error("Error: Out of bounds\n"); + } + if ( kernelOc % numConvGroups != 0 ) + { + std::string constraint = "Number of filter kernels must be equally divisible by the number of convolution groups"; + std::string extra = fmt::format("Conv Groups = {}, filter kernels = {}", numConvGroups, kernelOc); + throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + } } } @@ -352,20 +361,23 @@ void ConstraintDepthwiseConvOfmDepth(const Operator &op, const SubGraph &subgrap { auto ifm = TensorFromUsage(regor::TensorUsage::IFM, op, builtinOperator, *subgraph.tensors()); auto ofm = TensorFromUsage(regor::TensorUsage::OFM, op, builtinOperator, *subgraph.tensors()); - auto ifmDepth = ShapeFromTens(ifm)[-1]; - auto ofmDepth = ShapeFromTens(ofm)[-1]; - - int depth_multiplier = CheckedPtr(op.builtin_options_as_DepthwiseConv2DOptions())->depth_multiplier(); - - if ( ifmDepth < 0 || ofmDepth < 0 ) - { - throw std::runtime_error("Error: Out of bounds\n"); - } - if ( ifmDepth * depth_multiplier != ofmDepth ) + auto ifmShape = ShapeFromTens(ifm); + auto ofmShape = ShapeFromTens(ofm); + if ( ifmShape && ofmShape ) { - std::string constraint = "OFM depth must be a equal to IFM depth times depth multiplier"; - std::string extra = fmt::format("OFM depth = {}, IFM depth = {} and depth multiplier = {}", ofmDepth, ifmDepth, depth_multiplier); - throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + auto ifmDepth = ifmShape[-1]; + auto ofmDepth = ofmShape[-1]; + if ( ifmDepth < 0 || ofmDepth < 0 ) + { + throw std::runtime_error("Error: Out of bounds\n"); + } + int depth_multiplier = CheckedPtr(op.builtin_options_as_DepthwiseConv2DOptions())->depth_multiplier(); + if ( ifmDepth * depth_multiplier != ofmDepth ) + { + std::string constraint = "OFM depth must be a equal to IFM depth times depth multiplier"; + std::string extra = fmt::format("OFM depth = {}, IFM depth = {} and depth multiplier = {}", ofmDepth, ifmDepth, depth_multiplier); + throw InvalidTfLiteException(constraint, extra, op, subgraph, builtinOperator); + } } } diff --git a/ethosu/regor/tflite/tflite_reader.cpp b/ethosu/regor/tflite/tflite_reader.cpp index 34d97ac0..2228ef4d 100644 --- a/ethosu/regor/tflite/tflite_reader.cpp +++ b/ethosu/regor/tflite/tflite_reader.cpp @@ -44,13 +44,14 @@ namespace regor { + static void SetKernel(const std::shared_ptr &operation, const Point2i &size, const Point2i &stride, const Point2i &dilation, tflite::Padding padding, int depthMultiplier = 1) { const auto &inputShape = operation->IFM(0)->StorageShape(); const auto &outputShape = operation->OFM()->StorageShape(); Margin pad; - if ( operation->Type() == OpType::TransposeConv2D ) + if ( operation->Type() == OpType::TransposeConv2D && inputShape && outputShape ) { // Calculate upscaled ifm height/width by multiplying with stride auto ifmWH = inputShape.WH() * stride; @@ -78,7 +79,7 @@ static void SetKernel(const std::shared_ptr &operation, const Point2i pad = Margin((ypad + 1) / 2, (xpad + 1) / 2, ypad / 2, xpad / 2); } } - else if ( padding == tflite::Padding::SAME ) + else if ( padding == tflite::Padding::SAME && inputShape ) { auto dWH = dilation * (size - Point2i(1, 1)) + Point2i(1, 1); int xpad = NeededTotalPadding(inputShape.Width(), stride.x, dWH.x); @@ -257,7 +258,10 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, auto ifm0 = operation->IFM(0); auto ifm1 = operation->IFM(1); assert(ifm0 && ifm1); - ofm->SetStorageShape(Shape::Max(ifm0->StorageShape(), ifm1->StorageShape())); + if ( ifm0->StorageShape() && ifm1->StorageShape() ) + { + ofm->SetStorageShape(Shape::Max(ifm0->StorageShape(), ifm1->StorageShape())); + } } } assert(tensorQuantization.count(ofm->Uid()) > 0); -- GitLab