diff --git a/ethosu/regor/test/test_tflite_supported_operators.cpp b/ethosu/regor/test/test_tflite_supported_operators.cpp index 49df5725feaef68cd0d9ede76c8a60fbebc8bfab..987185d054186d0afd861a36be7c0e6eb5e3b57a 100644 --- a/ethosu/regor/test/test_tflite_supported_operators.cpp +++ b/ethosu/regor/test/test_tflite_supported_operators.cpp @@ -80,9 +80,13 @@ TEST_CASE("Supported operators Common") arch->CheckConfiguration(err); REQUIRE(err == "noerror"); auto supportedOps = MakeSupportedOpsChecker(REGOR_ARCH_ETHOSU55, arch); + SECTION("ConstraintTensQuantized") { auto op = CreateOperation(OpType::Conv2D, Shape(1, 8, 8, 1), DataType::Int8, Shape(1, 8, 8, 1), DataType::Int8); + std::vector values = {1}; + auto weights = CreateTensor("weights", Shape(1, 1, 1, 1), DataType::Int8, std::move(values)); + op->ConnectInput(TensorUsage::Weights, weights).Set(Quantization::Unit()); // Regular op should pass REQUIRE(supportedOps->Check(op.get()) == true); auto &quant = op->Output(TensorUsage::OFM)->quantization; @@ -94,6 +98,41 @@ TEST_CASE("Supported operators Common") REQUIRE(supportedOps->Check(op.get()) == false); op->Disconnect(); } + SECTION("ConstraintMustHaveIFM") + { + auto op = CreateOperation(OpType::Exp, Shape(1, 8, 8, 1), DataType::Int8, Shape(1, 8, 8, 1), DataType::Int8); + op->DisconnectInputInvalidatingInputs(TensorUsage::IFM); + REQUIRE(supportedOps->Check(op.get()) == false); + op->Disconnect(); + } + SECTION("ConstraintMustHaveOFM") + { + auto op = CreateOperation(OpType::Exp, Shape(1, 8, 8, 1), DataType::Int8, Shape(1, 8, 8, 1), DataType::Int8); + auto ifm = op->Input(TensorUsage::IFM0)->tensor; + op->Disconnect(); + op->ConnectInput(TensorUsage::IFM0, ifm); + REQUIRE(supportedOps->Check(op.get()) == false); + op->Disconnect(); + } + SECTION("ConstraintMustHaveShape") + { + auto op = CreateOperation(OpType::Add, Shape(1, 8, 8, 1), DataType::Int8, Shape(1, 8, 8, 1), DataType::Int8, + Shape(1, 8, 8, 1), DataType::Int8); + op->Output(TensorUsage::OFM)->shape = Shape(); + REQUIRE(supportedOps->Check(op.get()) == false); + op->Disconnect(); + } + SECTION("ConstraintFCWeightShape") + { + auto op = CreateOperation(OpType::FullyConnected, Shape(1, 2, 2, 1), DataType::Int8, Shape(1, 2, 1, 1), DataType::Int8); + std::vector values = {1, 1, 1, 1, 1, 1, 1, 1}; + auto weights = CreateTensor("weights", Shape(4, 1, 1, 2), DataType::Int8, std::move(values)); + op->ConnectInput(TensorUsage::Weights, weights).Set(Quantization::Unit()); + REQUIRE(supportedOps->Check(op.get()) == true); + op->Input(TensorUsage::Weights)->tensor->Reshape(Shape(2, 2, 1, 2)); + REQUIRE(supportedOps->Check(op.get()) == false); + op->Disconnect(); + } } TEST_CASE("Supported operators EthosU55") diff --git a/ethosu/regor/tflite/tflite_reader.cpp b/ethosu/regor/tflite/tflite_reader.cpp index d9e4e15fe8f2db234fecba31ca280982df32b978..34d97ac0c2cc8f0619fa7d2f697aada570853baf 100644 --- a/ethosu/regor/tflite/tflite_reader.cpp +++ b/ethosu/regor/tflite/tflite_reader.cpp @@ -191,7 +191,6 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, const auto &input_tensors = *tflite_inputs; // A vector of indices into the `tensors` vector int indirect_index = 0; // An index into `input_tensors` int ifm_count = 0; - bool shapelessTensors = false; for ( const auto &map_entry : TfLiteMapping::InputTensorIndices(op_type) ) { const TensorUsage usage = map_entry.second; @@ -201,7 +200,6 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, if ( direct_index >= 0 ) // -1 indicates an optional tensor is not present { auto &tensor = tensors.at(direct_index); - shapelessTensors = shapelessTensors || !tensor->StorageShape(); assert(tensorQuantization.count(tensor->Uid()) > 0); operation->ConnectInput(usage, tensor).Set(tensorQuantization[tensor->Uid()]); } @@ -217,7 +215,6 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, if ( direct_index >= 0 ) { auto &tensor = tensors.at(direct_index); - shapelessTensors = shapelessTensors || !tensor->StorageShape(); if ( IsVariadic(op_type) ) { // Treat all input tensors beyond those specified in the indices map as IFMs. @@ -263,7 +260,6 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, ofm->SetStorageShape(Shape::Max(ifm0->StorageShape(), ifm1->StorageShape())); } } - shapelessTensors = shapelessTensors || !ofm->StorageShape(); assert(tensorQuantization.count(ofm->Uid()) > 0); operation->ConnectOutput(MakeTensorUsage(TensorUsage::OFM, ofm_count++), ofm).Set(tensorQuantization[ofm->Uid()]); } @@ -276,17 +272,6 @@ void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, placeholder.push_back(std::move(tensor)); } - if ( ifm_count == 0 || ofm_count == 0 ) - { - // NPU operations must have IFM and OFM - operation->SetPassthroughOp(); - } - - if ( shapelessTensors ) - { - operation->SetPassthroughOp(); - } - if ( optDb ) { optDb->SourceOp(operation.get(), ext_key); @@ -516,20 +501,18 @@ void TfLiteReader::ParseOperatorOptions( { const auto options = GetBuiltinOptions(tflite_operator); activation_function = options->fused_activation_function(); - // TODO: Are `weights_format`, `keep_num_dims` or `asymmetric_quantize_inputs` used? - auto weight_tensor = operation->Input(TensorUsage::Weights)->tensor; if ( weight_tensor->AxisOrder() == AxisOrder::Unknown ) { - // Reshape weight tensor from (num_outputs, ..., num_inputs) to (num_outputs, 1, 1, num_inputs) - weight_tensor->SetAxisOrder(AxisOrder::OHWI); const auto &shape = weight_tensor->StorageShape(); - for ( int i = 1; i < shape.Size() - 1; i++ ) + // Reshape weight tensor from (num_outputs, ..., num_inputs) to (num_outputs, 1, 1, num_inputs) + if ( shape.Size() >= 2 && shape.Elements() == (shape[0] * shape[-1]) ) { - if ( shape[i] != 1 ) operation->SetPassthroughOp(); + weight_tensor->Reshape(Shape(shape[0], 1, 1, shape[-1])); + weight_tensor->SetAxisOrder(AxisOrder::OHWI); + operation->Input(TensorUsage::Weights)->shape = weight_tensor->StorageShape(); } - weight_tensor->Reshape(Shape(shape[0], 1, 1, shape[-1])); } else { @@ -657,18 +640,10 @@ void TfLiteReader::ParseOperatorOptions( break; case tflite::BuiltinOptions::SplitOptions: - { - int num_splits = GetBuiltinOptions(tflite_operator)->num_splits(); - if ( size_t(num_splits) != operation->Outputs().size() ) operation->SetPassthroughOp(); - } - break; + break; case tflite::BuiltinOptions::SplitVOptions: - { - int num_splits = GetBuiltinOptions(tflite_operator)->num_splits(); - if ( size_t(num_splits) != operation->Outputs().size() ) operation->SetPassthroughOp(); - } - break; + break; case tflite::BuiltinOptions::SVDFOptions: { diff --git a/ethosu/regor/tflite/tflite_supported_operators.cpp b/ethosu/regor/tflite/tflite_supported_operators.cpp index 3ceb59f8f27f724903f3a0c33791e4b6fb845aa8..25d2296e931252a442cf155d44931d8b7fa2a8b9 100644 --- a/ethosu/regor/tflite/tflite_supported_operators.cpp +++ b/ethosu/regor/tflite/tflite_supported_operators.cpp @@ -56,6 +56,88 @@ bool TfLiteSupportedOperators::ConstraintTensDtypes(const Operation *op) return true; } +bool TfLiteSupportedOperators::ConstraintNumSplits(const Operation *op) +{ + const char *constraint = "num_splits must match the number of outputs"; + const tflite::Operator *passthrough = static_cast(op->Passthrough()); + OpType opType = op->Type(); + int numSplits = 0; + if ( opType == OpType::Split ) + { + assert(passthrough); + const auto *opt = passthrough->builtin_options_as_SplitOptions(); + assert(opt); + numSplits = opt->num_splits(); + } + else if ( opType == OpType::SplitV ) + { + assert(passthrough); + const auto *opt = passthrough->builtin_options_as_SplitVOptions(); + assert(opt); + numSplits = opt->num_splits(); + } + else + { + return true; + } + int numOutputs = op->Outputs().size(); + if ( numSplits != numOutputs ) + { + Failure(op, fmt::format("num_splits: {} does not match the number of outputs: {}", numSplits, numOutputs), constraint); + return false; + } + return true; +} + +bool TfLiteSupportedOperators::ConstraintMustHaveIFM(const Operation *op) +{ + const char *constraint = "Operations must have at least one IFM."; + for ( const auto item : op->Inputs().pairs() ) + { + auto usage = item.first; + if ( IsIFM(usage) ) + { + return true; + } + } + Failure(op, "Operation without IFM", constraint); + return false; +} + +bool TfLiteSupportedOperators::ConstraintMustHaveOFM(const Operation *op) +{ + const char *constraint = "Operations must have at least one OFM."; + for ( const auto item : op->Outputs().pairs() ) + { + auto usage = item.first; + if ( IsOFM(usage) ) + { + return true; + } + } + Failure(op, "Operation without OFM", constraint); + return false; +} + +bool TfLiteSupportedOperators::ConstraintTensMustHaveShape(const Operation *op) +{ + const char *constraint = "Tensors must have constant shape."; + for ( const auto *list : {&op->Inputs(), &op->Outputs()} ) + { + for ( const auto &item : list->pairs() ) + { + auto usage = item.first; + const auto &conn = item.second; + if ( !conn.shape ) + { + Failure(op, "Operation has shapeless tensor", constraint); + return false; + } + } + } + return true; +} + bool TfLiteSupportedOperators::ConstraintTensQuantized(const Operation *op) { const char *constraint = "Input(s), Output and Weight tensors must have quantization parameters"; @@ -93,6 +175,26 @@ bool TfLiteSupportedOperators::ConstraintTensQuantized(const Operation *op) return true; } +bool TfLiteSupportedOperators::ConstraintFCWeightShape(const Operation *op) +{ + const char *constraint = "FullyConnected weights must be on the form I,1,1,..,1,O"; + if ( op->Type() != OpType::FullyConnected ) + { + return true; + } + auto weights = op->Input(TensorUsage::Weights); + assert(weights); + assert(weights->tensor); + const auto &shape = weights->tensor->StorageShape(); + // Total elements must be equal to first-dim * last-dim + if ( shape.Size() < 2 || (shape.Elements() != (shape[0] * shape[-1])) ) + { + Failure(op, fmt::format("Unsupported weights shape: {}", shape.ToString()), constraint); + return false; + } + return true; +} + void TfLiteSupportedOperators::Failure(const Operation *op, const std::string &message, const std::string &constraint) { assert(op); @@ -121,6 +223,11 @@ TfLiteSupportedOperators::TfLiteSupportedOperators(IArchitectureConstraints *con _genericChecks = { &TfLiteSupportedOperators::ConstraintOpType, &TfLiteSupportedOperators::ConstraintTensDtypes, + &TfLiteSupportedOperators::ConstraintNumSplits, + &TfLiteSupportedOperators::ConstraintMustHaveIFM, + &TfLiteSupportedOperators::ConstraintMustHaveOFM, + &TfLiteSupportedOperators::ConstraintTensMustHaveShape, + &TfLiteSupportedOperators::ConstraintFCWeightShape, &TfLiteSupportedOperators::ConstraintTensQuantized, }; } @@ -151,7 +258,11 @@ void TfLiteSupportedOperators::Process(Graph *graph) { if ( op->Type() == OpType::Passthrough ) { - // don't check passthrough ops + // Op is already passthrough + // Only valid scenario is that op is a previously disconnected activation + assert(op->Passthrough() == nullptr && "source-operation set to passthrough before supported-ops checks"); + assert(op->CountInputs(TensorUsage::IFM) == 0); + assert(op->CountOutputs(TensorUsage::OFM) == 0); continue; } if ( !Check(op.get()) ) diff --git a/ethosu/regor/tflite/tflite_supported_operators.hpp b/ethosu/regor/tflite/tflite_supported_operators.hpp index 6aaacc9ae45d1ee27f24eff0ec27ef440922a794..8fc995f16501d1b7137d80b041ee1e40d2cfef9d 100644 --- a/ethosu/regor/tflite/tflite_supported_operators.hpp +++ b/ethosu/regor/tflite/tflite_supported_operators.hpp @@ -43,11 +43,16 @@ public: virtual bool Check(const Operation *) = 0; protected: - static void Failure(const Operation *op, const std::string &message, const std::string &constraint); + static void Failure(const Operation *op, const std::string &message = "", const std::string &constraint = ""); private: bool ConstraintOpType(const Operation *op); bool ConstraintTensDtypes(const Operation *op); + bool ConstraintNumSplits(const Operation *op); + bool ConstraintMustHaveIFM(const Operation *op); + bool ConstraintMustHaveOFM(const Operation *op); + bool ConstraintTensMustHaveShape(const Operation *op); bool ConstraintTensQuantized(const Operation *op); + bool ConstraintFCWeightShape(const Operation *op); }; } // namespace regor diff --git a/ethosu/regor/tflite/tflite_supported_operators_u55.cpp b/ethosu/regor/tflite/tflite_supported_operators_u55.cpp index 837fdfd1233b00bca8589bfbe21ce246778624aa..b65df18acda70363b726dc1beaed5a43893382fe 100644 --- a/ethosu/regor/tflite/tflite_supported_operators_u55.cpp +++ b/ethosu/regor/tflite/tflite_supported_operators_u55.cpp @@ -103,7 +103,6 @@ bool TfLiteSupportedOperatorsU55::Check(const Operation *op) return true; } - bool TfLiteSupportedOperatorsU55::ConstraintBroadcastShapes(const Operation *op) { const char *constraint = "One input-tensor must match the shape of the output-tensor."; @@ -152,4 +151,5 @@ bool TfLiteSupportedOperatorsU55::ConstraintReverse(const Operation *op) } return true; } + } // namespace regor