From 937f67faf9a5b6bab61841fc7254fcceeffad3df Mon Sep 17 00:00:00 2001 From: Fredrik Svedberg Date: Thu, 12 Jun 2025 16:45:03 +0200 Subject: [PATCH] MLBEDSW-10359 Update to TOSA v1.0 Change-Id: I93960e0a9d4f5616784f6cc3d35ec8eb762e41bc Signed-off-by: Fredrik Svedberg --- ethosu/regor/CMakeLists.txt | 3 +- .../architecture/architecture_constraints.hpp | 1 + .../ethosu55/ethos_u55_constraints.cpp | 14 + .../ethos_u55_register_cs_generator.cpp | 27 +- .../ethosu85/ethos_u85_constraints.cpp | 15 +- .../ethos_u85_register_cs_generator.cpp | 31 +- ethosu/regor/architecture/mlw_encode.hpp | 3 +- ethosu/regor/common/buffer_view.hpp | 8 +- ethosu/regor/common/data_type.hpp | 10 +- ethosu/regor/common/shape.hpp | 22 +- ethosu/regor/compiler/graph_builder.cpp | 11 +- ethosu/regor/compiler/graphir_optimiser.cpp | 233 +- ethosu/regor/compiler/graphir_optimiser.hpp | 6 +- .../high_level_command_stream_generator.cpp | 5 +- ethosu/regor/compiler/kernel.hpp | 1 - ethosu/regor/compiler/op_type.hpp | 11 +- ethosu/regor/compiler/operation_util.hpp | 32 + ethosu/regor/compiler/scheduler_decompose.cpp | 20 +- ethosu/regor/compiler/tensor.cpp | 4 +- ethosu/regor/compiler/tensor.hpp | 5 +- ethosu/regor/compiler/tensor_properties.hpp | 1 + .../regor/compiler/tflite_graph_optimiser.cpp | 67 +- .../regor/compiler/tosa_graph_validator.cpp | 14 +- ethosu/regor/include/graphapi.hpp | 6 +- ethosu/regor/include/graphapi_tosa_types.hpp | 9 +- ethosu/regor/test/test_shape.cpp | 13 + ethosu/regor/test/test_tosa_validator.cpp | 3 +- ethosu/regor/tflite/tflite_reader.cpp | 6 +- ethosu/regor/tosa/tosaValidationGenerator.rb | 53 +- ethosu/regor/tosa/tosa_argument_checks.cpp | 39 +- ethosu/regor/tosa/tosa_argument_checks.hpp | 4 +- ethosu/regor/tosa/tosa_error_checks.cpp | 1066 ++- ethosu/regor/tosa/tosa_error_checks.hpp | 122 +- ethosu/regor/tosa/tosa_level_checks.cpp | 42 +- ethosu/regor/tosa/tosa_level_checks.hpp | 9 +- ethosu/regor/tosa/tosa_mapping.cpp | 25 +- ethosu/regor/tosa/tosa_reader.cpp | 765 +- ethosu/regor/tosa/tosa_require_checks.cpp | 108 +- ethosu/regor/tosa/tosa_require_checks.hpp | 38 +- ethosu/regor/tosa/tosa_schema_generated.hpp | 6526 +++++++++++++---- ethosu/regor/tosa/tosa_validator.cpp | 11 +- ethosu/regor/tosa/tosa_validator.hpp | 7 +- ...sa_validator_version_0_60_0_profile_bi.cpp | 2898 -------- ...r_version_1_0_0_draft_profile_pro_int.cpp} | 776 +- 44 files changed, 6828 insertions(+), 6242 deletions(-) delete mode 100644 ethosu/regor/tosa/tosa_validator_version_0_60_0_profile_bi.cpp rename ethosu/regor/tosa/{tosa_validator_version_0_80_0_profile_bi.cpp => tosa_validator_version_1_0_0_draft_profile_pro_int.cpp} (83%) diff --git a/ethosu/regor/CMakeLists.txt b/ethosu/regor/CMakeLists.txt index f315b48a..8c52868a 100644 --- a/ethosu/regor/CMakeLists.txt +++ b/ethosu/regor/CMakeLists.txt @@ -305,8 +305,7 @@ regor_lib( "tosa/tosa_error_checks.cpp" "tosa/tosa_level_checks.cpp" "tosa/tosa_require_checks.cpp" - "tosa/tosa_validator_version_0_60_0_profile_bi.cpp" - "tosa/tosa_validator_version_0_80_0_profile_bi.cpp" + "tosa/tosa_validator_version_1_0_0_draft_profile_pro_int.cpp" "tosa/tosa_reader.cpp" "tosa/tosa_mapping.cpp" "tflite/tflite_reader.cpp" diff --git a/ethosu/regor/architecture/architecture_constraints.hpp b/ethosu/regor/architecture/architecture_constraints.hpp index 9790374b..79861863 100644 --- a/ethosu/regor/architecture/architecture_constraints.hpp +++ b/ethosu/regor/architecture/architecture_constraints.hpp @@ -52,6 +52,7 @@ struct ArchOperatorQuery ArchFM ofm; ReverseType reverseMask = ReverseType::None; TransposeType transposeMask = TransposeType::None; + WeightFormat weightFormat = WeightFormat::None; const Kernel *kernel = nullptr; int axis = 0; // Uses negative notation: -1 = C, -2 = W, ... ~ArchOperatorQuery(){}; diff --git a/ethosu/regor/architecture/ethosu55/ethos_u55_constraints.cpp b/ethosu/regor/architecture/ethosu55/ethos_u55_constraints.cpp index c5401a16..6b3a1456 100644 --- a/ethosu/regor/architecture/ethosu55/ethos_u55_constraints.cpp +++ b/ethosu/regor/architecture/ethosu55/ethos_u55_constraints.cpp @@ -369,6 +369,11 @@ Flags EthosU55Constraints::OperatorQuery(OpType opType, const ArchO // TransposeConv2D and Conv3D are legalized during decomposition else if ( opType == OpType::TransposeConv2D || opType == OpType::Conv3D ) { + // Check for supported weight format + if ( query && query->weightFormat != WeightFormat::Default ) + { + return QueryResult::Unsupported; + } if ( req ) { req->req.Set(ArchRequirement::Decompose); @@ -394,6 +399,15 @@ Flags EthosU55Constraints::OperatorQuery(OpType opType, const ArchO return QueryResult::NativeConstrained; } + // Check for supported weight format for convolution type ops + if ( opType == OpType::DepthwiseConv2D || opType == OpType::Conv2D || opType == OpType::FullyConnected ) + { + if ( query->weightFormat != WeightFormat::Default ) + { + return QueryResult::Unsupported; + } + } + Flags result = QueryResult::Native; if ( npuOp == EthosU55NpuOp::ReduceSum ) diff --git a/ethosu/regor/architecture/ethosu55/ethos_u55_register_cs_generator.cpp b/ethosu/regor/architecture/ethosu55/ethos_u55_register_cs_generator.cpp index c12f32b7..62380fa1 100644 --- a/ethosu/regor/architecture/ethosu55/ethos_u55_register_cs_generator.cpp +++ b/ethosu/regor/architecture/ethosu55/ethos_u55_register_cs_generator.cpp @@ -25,6 +25,7 @@ #include "common/data_type.hpp" #include "compiler/high_level_command_stream.hpp" #include "compiler/op_type.hpp" +#include "compiler/operation_util.hpp" #include "ethos_u55.hpp" #include "ethos_u55_scaling.hpp" #define NPU_DISASSEMBLE @@ -438,30 +439,8 @@ bool EthosU55RCSGenerator::IsScalar(const HLCFeatureMap &fm, int32_t &scalarValu { const auto &buffer = fm.constBuffer; // A 1-sized feature map in constant memory is a scalar - bool isScalar = fm.shape.Elements() == 1 && buffer; - if ( isScalar ) - { - if ( fm.dataType == DataType::Int8 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::UInt8 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::Int16 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::UInt16 ) - { - scalarValue = buffer->Data()[0]; - } - else - { // Unsupported scalar value - isScalar = false; - } - } + bool isScalar = fm.shape.Elements() == 1 && buffer && IsInteger(fm.dataType) && DataTypeSizeBits(fm.dataType) <= 16; + if ( isScalar ) scalarValue = Scalar(*buffer, fm.dataType); return isScalar; } diff --git a/ethosu/regor/architecture/ethosu85/ethos_u85_constraints.cpp b/ethosu/regor/architecture/ethosu85/ethos_u85_constraints.cpp index 91fee32c..631a7eb1 100644 --- a/ethosu/regor/architecture/ethosu85/ethos_u85_constraints.cpp +++ b/ethosu/regor/architecture/ethosu85/ethos_u85_constraints.cpp @@ -350,7 +350,7 @@ Flags EthosU85Constraints::OperatorQuery(OpType opType, const ArchO { req->req.Set(ArchRequirement::Decompose); } - return QueryResult::NativeConstrainedHasReq; + return query ? QueryResult::NativeHasReq : QueryResult::NativeConstrainedHasReq; } // Check direct native support of the opType @@ -367,6 +367,19 @@ Flags EthosU85Constraints::OperatorQuery(OpType opType, const ArchO return QueryResult::NativeConstrained; } + // Check for supported weight format for convolution type ops + if ( opType == OpType::DepthwiseConv2D || opType == OpType::Conv2D || opType == OpType::FullyConnected ) + { + if ( query->weightFormat == WeightFormat::None ) + { + if ( req ) + { + req->req.Set(ArchRequirement::Decompose); + } + result.Set(QueryResult::HasRequirements); + } + } + // Fusing checks if ( query->transposeMask != TransposeType::None ) { diff --git a/ethosu/regor/architecture/ethosu85/ethos_u85_register_cs_generator.cpp b/ethosu/regor/architecture/ethosu85/ethos_u85_register_cs_generator.cpp index 68cd2827..0ab17246 100644 --- a/ethosu/regor/architecture/ethosu85/ethos_u85_register_cs_generator.cpp +++ b/ethosu/regor/architecture/ethosu85/ethos_u85_register_cs_generator.cpp @@ -25,6 +25,7 @@ #include "common/data_type.hpp" #include "compiler/high_level_command_stream.hpp" #include "compiler/op_type.hpp" +#include "compiler/operation_util.hpp" #include "ethos_u85.hpp" #define NPU_DISASSEMBLE #define NPU_NAMESPACE ethosu85 @@ -593,34 +594,8 @@ bool EthosU85RCSGenerator::IsScalar(const HLCFeatureMap &fm, int32_t &scalarValu { const auto &buffer = fm.constBuffer; // A 1-sized feature map in constant memory is a scalar - bool isScalar = fm.shape.Elements() == 1 && buffer; - if ( isScalar ) - { - if ( fm.dataType == DataType::Int8 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::UInt8 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::Int16 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::UInt16 ) - { - scalarValue = buffer->Data()[0]; - } - else if ( fm.dataType == DataType::Int32 ) - { - scalarValue = buffer->Data()[0]; - } - else - { // Unsupported scalar value - isScalar = false; - } - } + bool isScalar = fm.shape.Elements() == 1 && buffer && IsInteger(fm.dataType) && DataTypeSizeBits(fm.dataType) <= 32; + if ( isScalar ) scalarValue = Scalar(*buffer, fm.dataType); return isScalar; } diff --git a/ethosu/regor/architecture/mlw_encode.hpp b/ethosu/regor/architecture/mlw_encode.hpp index 27d3bdb0..bca6f6fd 100644 --- a/ethosu/regor/architecture/mlw_encode.hpp +++ b/ethosu/regor/architecture/mlw_encode.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2021-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -23,6 +23,7 @@ enum class WeightFormat : uint16_t { + None = 0, Default = 1 << 0, Fast = 1 << 1, Sparse2_4 = 1 << 2 diff --git a/ethosu/regor/common/buffer_view.hpp b/ethosu/regor/common/buffer_view.hpp index b7ec922b..895dda3f 100644 --- a/ethosu/regor/common/buffer_view.hpp +++ b/ethosu/regor/common/buffer_view.hpp @@ -470,6 +470,8 @@ public: return _get(_data, index); } + size_t Count() const { return _count; } + // Simple wrapping iterator template struct iterator_base_t @@ -642,7 +644,7 @@ public: v *= axisElements[i]; } - _strideBytes = Shape(&strides[0], sz); + _strideBytes = Shape(strides.data(), sz); } } else @@ -715,6 +717,8 @@ public: return Values(); case DataType::UInt8: return Values(); + case DataType::Bool8: + return Values(); case DataType::Int16: return Values(); case DataType::UInt16: @@ -723,6 +727,8 @@ public: return Values(); case DataType::UInt32: return Values(); + case DataType::Int48: + return Values(); case DataType::Int64: return Values(); case DataType::UInt64: diff --git a/ethosu/regor/common/data_type.hpp b/ethosu/regor/common/data_type.hpp index 2d2f9a12..bb02cb1c 100644 --- a/ethosu/regor/common/data_type.hpp +++ b/ethosu/regor/common/data_type.hpp @@ -53,7 +53,11 @@ public: return res >> 16; } - operator uint64_t() const { return static_cast(operator int64_t()); } + template::value, bool> = 0> + explicit operator TYPE() const + { + return static_cast(operator int64_t()); + } private: uint8_t _data[6]{0}; @@ -100,7 +104,9 @@ enum class DataType : uint16_t QUInt16 = QUInt | Bits16, QUInt32 = QUInt | Bits32, Float = 1 << 12, - BFloat16 = Float | Bits16 | Packed, + Float8e4m3 = Float | Bits8 | Asymmetric, + Float8e5m2 = Float | Bits8, + BFloat16 = Float | Bits16 | Asymmetric, Float16 = Float | Bits16, Float32 = Float | Bits32, Float64 = Float | Bits64, diff --git a/ethosu/regor/common/shape.hpp b/ethosu/regor/common/shape.hpp index 88dfc812..74293bae 100644 --- a/ethosu/regor/common/shape.hpp +++ b/ethosu/regor/common/shape.hpp @@ -76,23 +76,25 @@ public: At(3) = n; } - template - Shape(const TYPE *axes, size_t length) + template::value_type>::value, bool> = true> + Shape(Iterator first, size_t length) { assert(length < size_t(std::numeric_limits::max())); Init(int(length)); - if ( axes != nullptr ) + auto *local = Storage(); + // Reverses input into position + assert(size_t(_last) == length - 1); + for ( size_t i = 0; i < length; i++ ) { - auto *local = Storage(); - // Reverses input into position - assert(size_t(_last) == length - 1); - for ( size_t i = 0; i < length; i++ ) - { - local[_last - i] = int32_t(axes[i]); - } + local[_last - i] = int32_t(*first++); } } + template::value_type>::value, bool> = true> + Shape(Iterator first, Iterator end) : Shape(first, size_t(std::distance(first, end))) + { + } + Shape(std::nullptr_t, int length, int fillValue = 0) { Init(length, fillValue); } Shape(const Shape &other) diff --git a/ethosu/regor/compiler/graph_builder.cpp b/ethosu/regor/compiler/graph_builder.cpp index 0ff963c8..f1ecb7b0 100644 --- a/ethosu/regor/compiler/graph_builder.cpp +++ b/ethosu/regor/compiler/graph_builder.cpp @@ -112,6 +112,13 @@ static constexpr std::pair s_aTosaMapping[] = { //{tosa::Op::RFFT2D, OpType::CurrentlyUnsupported}, //{tosa::Op::ERF, OpType::CurrentlyUnsupported}, //{tosa::Op::DIM, OpType::CurrentlyUnsupported}, + //{tosa::Op::COS, OpType::CurrentlyUnsupported}, + //{tosa::Op::SIN, OpType::CurrentlyUnsupported}, + //{tosa::Op::YIELD, OpType::CurrentlyUnsupported}, + //{tosa::Op::VARIABLE, OpType::CurrentlyUnsupported}, + //{tosa::Op::VARIABLE_WRITE, OpType::CurrentlyUnsupported}, + //{tosa::Op::VARIABLE_READ, OpType::CurrentlyUnsupported}, + //{tosa::Op::CONST_SHAPE, OpType::CurrentlyUnsupported}, }; static constexpr std::pair s_aTypeMapping[] = { @@ -127,6 +134,8 @@ static constexpr std::pair s_aTypeMapp {GraphApi::GraphDataType::UInt32, DataType::UInt32}, {GraphApi::GraphDataType::UInt48, DataType::UInt48}, {GraphApi::GraphDataType::UInt64, DataType::UInt64}, + {GraphApi::GraphDataType::Float8e4m3, DataType::Float8e4m3}, + {GraphApi::GraphDataType::Float8e5m2, DataType::Float8e5m2}, {GraphApi::GraphDataType::BFloat16, DataType::BFloat16}, {GraphApi::GraphDataType::Float16, DataType::Float16}, {GraphApi::GraphDataType::Float32, DataType::Float32}, @@ -180,7 +189,7 @@ bool GraphBuilder::RequireSyntaxVersion(uint32_t version, int32_t level) { _syntaxVersion = (version & 0xFFFFFF00) | uint32_t(level); - if ( _syntaxVersion > (GraphApi::VERSION_TOSA_0_80 | GraphApi::PROFILE_BASELINE) ) // 0.80.Baseline + if ( _syntaxVersion > (GraphApi::VERSION_TOSA_1_00 | GraphApi::PROFILE_BASELINE) ) // 1.0.Baseline { return false; } diff --git a/ethosu/regor/compiler/graphir_optimiser.cpp b/ethosu/regor/compiler/graphir_optimiser.cpp index dc67759a..79a49c2d 100644 --- a/ethosu/regor/compiler/graphir_optimiser.cpp +++ b/ethosu/regor/compiler/graphir_optimiser.cpp @@ -83,6 +83,7 @@ Tensor *GraphIrOptimiser::ConvertBool8Tensors(Graph *graph, Tensor *tensor) // Replace the IFM of ops consuming the graph input tensor std::shared_ptr graphInputTensor = tensor->shared_from_this(); std::shared_ptr newTensor = tensor->Clone(); + newTensor->SetBuffer(nullptr); newTensor->SetName(newTensor->Name() + "_int8"); ReplaceConsumerInput(nullptr, graphInputTensor->Readers(), graphInputTensor.get(), newTensor); @@ -215,6 +216,128 @@ Operation *GraphIrOptimiser::ConvertAttributes(Graph *const graph, Operation *co kernel = operation->Kernel()->WithSize({ifmConn->shape.Width() /* W */, 1 /* H */}); operation->SetKernel(std::make_unique(std::move(kernel))); } + + return operation; +} + +Operation *GraphIrOptimiser::ConvertAttributeTensors(Graph *const graph, Operation *const operation) +{ + UNUSED(graph); + OpType opType = operation->Type(); + if ( opType == OpType::Mul ) + { + auto *attr = operation->Attribute(); + // Shift can be a compile time constant tensor + if ( auto *shiftConn = operation->Input(TensorUsage::Params) ) + { + assert(shiftConn->tensor->IsConstant()); + attr->shift = Scalar(*shiftConn->tensor); + } + } + else if ( opType == OpType::Pad ) + { + auto *attr = operation->Attribute(); + // Pad value can be a compile time constant tensor + if ( auto padConstConn = operation->Input(TensorUsage::Params1) ) + { + assert(padConstConn->tensor->IsConstant()); + attr->pad_const = Scalar(*padConstConn->tensor); + } + } + else if ( opType == OpType::Slice ) + { + auto *attr = operation->Attribute(); + // Start shape can be a compile time constant tensor + if ( auto startConn = operation->Input(TensorUsage::Params0) ) + { + assert(startConn->tensor->IsConstant()); + attr->begin = TensorToShape(startConn->tensor.get(), startConn->shape.Elements()); + } + // Size shape can be a compile time constant tensor + if ( auto sizeConn = operation->Input(TensorUsage::Params1) ) + { + assert(sizeConn->tensor->IsConstant()); + attr->size = TensorToShape(sizeConn->tensor.get(), sizeConn->shape.Elements()); + } + } + else if ( opType == OpType::Resize ) + { + auto *attr = operation->Attribute(); + // Scale can be a compile time constant tensor + if ( const auto scaleConn = operation->Input(TensorUsage::Params0) ) + { + assert(scaleConn->tensor->IsConstant()); + auto scale = TensorToShape(scaleConn->tensor.get(), scaleConn->shape.Elements()); + attr->scaleY.n = scale[0]; + attr->scaleY.d = scale[1]; + attr->scaleX.n = scale[2]; + attr->scaleX.d = scale[3]; + } + // Offset can be a compile time constant tensor + if ( const auto offsetConn = operation->Input(TensorUsage::Params1) ) + { + assert(offsetConn->tensor->IsConstant()); + auto offset = TensorToShape(offsetConn->tensor.get(), offsetConn->shape.Elements()); + attr->offset.x = offset[1]; + attr->offset.y = offset[0]; + } + // Border can be a compile time constant tensor + if ( const auto borderConn = operation->Input(TensorUsage::Params2) ) + { + assert(borderConn->tensor->IsConstant()); + auto border = TensorToShape(borderConn->tensor.get(), borderConn->shape.Elements()); + attr->border.x = border[1]; + attr->border.y = border[0]; + } + } + + return operation; +} + +// Convert compile time constant zero point tensors to quantization zero points +Operation *GraphIrOptimiser::ConvertZeroPointTensors(Graph *const graph, Operation *const operation) +{ + UNUSED(graph); + auto SetZeroPoint = [&](TensorUsage target, TensorUsage param, bool asUnsigned = false) + { + if ( const auto zpConn = operation->Input(param) ) + { + assert(zpConn->tensor->IsConstant()); + const auto targetConn = IsOFM(target) ? operation->Output(target) : operation->Input(target); + assert(targetConn); + auto dataType = asUnsigned ? zpConn->tensor->Type() & ~unsigned(DataType::Signed) : zpConn->tensor->Type(); + auto values = zpConn->tensor->View().Values(dataType); + targetConn->quantization.zeroPoints = {values.begin(), values.end()}; + } + }; + switch ( operation->Type() ) + { + case OpType::AvgPool: + case OpType::Neg: + SetZeroPoint(TensorUsage::IFM, TensorUsage::Params0); + SetZeroPoint(TensorUsage::OFM, TensorUsage::Params1); + break; + case OpType::Conv2D: + case OpType::Conv3D: + case OpType::DepthwiseConv2D: + case OpType::TransposeConv2D: + SetZeroPoint(TensorUsage::IFM, TensorUsage::Params0); + SetZeroPoint(TensorUsage::Weights, TensorUsage::Params1); + break; + case OpType::MatMul: + SetZeroPoint(TensorUsage::IFM0, TensorUsage::Params0); + SetZeroPoint(TensorUsage::IFM1, TensorUsage::Params1); + break; + case OpType::Rescale: + { + const auto signAttr = operation->Attribute(); + SetZeroPoint(TensorUsage::IFM, TensorUsage::Params2, signAttr->input_unsigned); + SetZeroPoint(TensorUsage::OFM, TensorUsage::Params3, signAttr->output_unsigned); + break; + } + default: + break; + } return operation; } @@ -254,21 +377,67 @@ Operation *GraphIrOptimiser::ConvertResizeOffsets(Graph *const graph, Operation return returnOp; } +template +static T Scale(int64_t v, const Quantization &quant, RoundMode rounding = RoundMode::AUTO, int doubleRound = 0) +{ + assert(doubleRound >= 0 && doubleRound < 31 && "Illegal double round"); + if ( !quant.IsUnitScale() ) + { + const auto &qs = quant.scales.front(); + assert(qs.shift >= 0 && qs.shift <= 63); + int64_t round = 1ll << (qs.shift - 1); // Natural round + const int64_t D = (qs.shift > 31 - doubleRound) ? 1ll << (30 - doubleRound) : 0; // Double round + switch ( rounding ) + { + case RoundMode::AUTO: + case RoundMode::NATURAL: + break; + case RoundMode::SYMMETRIC: + if ( v < 0 ) round -= 1; + break; + case RoundMode::TRUNCATE: + round = v >= 0 ? 0 : (1ll << qs.shift) - 1; + break; + case RoundMode::TRUNCATE_TO_LOWER: + round = 0; + break; + case RoundMode::DBL: + round += (v >= 0 ? D : -D); + break; + case RoundMode::DOUBLE_ASYMMETRIC: + round += D; + break; + default: + assert(false && "Unsupported rounding"); + } + v *= qs.scale; + v = (v + round) >> qs.shift; + assert(v > std::numeric_limits::min() && v < std::numeric_limits::max() && "Overflow - Unpredictable result"); + } + return T(v); +} + template struct EwShl { - T operator()(T a, T b) + int64_t operator()(T a, T b) { assert(b >= 0); - return T(std::make_unsigned_t(a) << std::make_unsigned_t(b)); + return int64_t(std::make_unsigned_t(a) << std::make_unsigned_t(b)); } }; +template +struct EwMul +{ + int64_t operator()(T a, T b) { return int64_t(a) * int64_t(b); } +}; + template static std::vector BroadcastValues(const Tensor *in, const Shape &oShape) { const Shape &iShape = in->StorageShape(); - const auto &iData = in->View().Values(); + const auto &iData = in->View().Values(in->Type()); const int elementCnt = oShape.Elements(); std::vector ret(elementCnt); @@ -324,7 +493,7 @@ std::shared_ptr ConstPropEw(Operation *const operation) for ( int i = 0; i < oShape.Elements(); i++ ) { - c[i] = F()(v0[i], v1[i]); + c[i] = Scale(F()(v0[i], v1[i]), ofmConn->quantization, ofmConn->rounding); } return std::make_shared(std::move(c)); @@ -338,23 +507,13 @@ std::shared_ptr ConstPropEw(Operation *const operation) switch ( dataType ) { case DataType::Int8: - { return ConstPropEw(operation); - } - break; case DataType::Int16: - { return ConstPropEw(operation); - } - break; case DataType::Int32: - { return ConstPropEw(operation); - } - break; default: return {}; - break; } } @@ -365,28 +524,35 @@ Operation *GraphIrOptimiser::ConstPropagation(Graph *const graph, Operation *con { if ( !IsIFM(usage) ) continue; - if ( !ifmConn.tensor->IsConstant() ) + if ( !ifmConn.tensor->IsConstant() || !ifmConn.quantization.IsUnitScale() ) { return operation; } } + auto *ofmConn = operation->Output(TensorUsage::OFM); + if ( ofmConn->quantization.type != QuantizationType::EXPLICIT ) + { + // TODO: Remove this restriction when MLBEDSW-10086 is implemented + return operation; + } + // Op has only constant input and result can be computed std::shared_ptr ofmBuf; switch ( operation->Type() ) { case OpType::SHL: - { ofmBuf = ConstPropEw(operation); - } - break; + break; + case OpType::Mul: + ofmBuf = ConstPropEw(operation); + break; default: break; } if ( ofmBuf ) { - auto *ofmConn = operation->Output(TensorUsage::OFM); auto *ofm = ofmConn->tensor.get(); ofm->SetBuffer(ofmBuf); @@ -489,8 +655,6 @@ Operation *GraphIrOptimiser::RewriteRescaleInputs(Graph *const, Operation *const auto ifmConn = operation->Input(TensorUsage::IFM); auto mulConn = operation->Input(TensorUsage::Params); auto shiftConn = operation->Input(TensorUsage::Params1); - auto mulView = mulConn->tensor->View(); - auto shiftView = shiftConn->tensor->View(); auto inT = ifmConn->tensor->Type(); auto mulT = mulConn->tensor->Type(); auto shiftT = shiftConn->tensor->Type(); @@ -499,17 +663,15 @@ Operation *GraphIrOptimiser::RewriteRescaleInputs(Graph *const, Operation *const std::vector newScale; auto *attr = operation->Attribute(); int channels = attr->per_channel ? ofmConn->shape.Depth() : 1; + const auto mulValues = mulConn->tensor->View().Values(mulT); + const auto shiftValues = shiftConn->tensor->View().Values(); for ( int i = 0; i < channels; i++ ) { - QuantizedScale qScale; - int32_t scale = mulT == DataType::Int32 ? mulView.Values()[i] : mulView.Values()[i]; - int32_t shift = shiftView.Values()[i]; + int32_t scale = mulValues[i]; + int32_t shift = shiftValues[i]; assert(attr->scale32 || static_cast(scale) == scale); assert(static_cast(shift) == shift); - - qScale.scale = attr->scale32 ? scale : static_cast(scale); - qScale.shift = shift; - newScale.emplace_back(qScale); + newScale.emplace_back(QuantizedScale{attr->scale32 ? scale : static_cast(scale), shift}); } ofmConn->quantization.scales = std::move(newScale); auto rescaleOp = operation->shared_from_this(); @@ -620,6 +782,7 @@ Operation *GraphIrOptimiser::RewriteRescale(Graph *const, Operation *const opera { auto castOp = std::make_shared(OpType::Cast); std::shared_ptr ifm32Tens = ifmConn->tensor->Clone(); + ifm32Tens->SetBuffer(nullptr); castOp->ConnectInput(TensorUsage::IFM, ifmConn->tensor).quantization.zeroPoints = ifmConn->quantization.zeroPoints; ifmConn->quantization.zeroPoints.clear(); @@ -825,8 +988,9 @@ Operation *GraphIrOptimiser::RewritePad(Graph *const, Operation *const operation const int padConst = int(attr->pad_const) + zeroPoint; // Decode the padding before and after each dimension as two shapes - Shape paddingBefore = TensorToShape(paramsConn->tensor.get(), paramsConn->shape.Width(), 2, 0); - Shape paddingAfter = TensorToShape(paramsConn->tensor.get(), paramsConn->shape.Width(), 2, 1); + assert(paramsConn->shape.Elements() == 2 * ifmConn->shape.Size()); + Shape paddingBefore = TensorToShape(paramsConn->tensor.get(), ifmConn->shape.Size(), 2, 0); + Shape paddingAfter = TensorToShape(paramsConn->tensor.get(), ifmConn->shape.Size(), 2, 1); std::shared_ptr padTensor; DataType dataType = ofmConn->tensor->Type(); @@ -1407,7 +1571,7 @@ Operation *GraphIrOptimiser::RewriteSlice(Graph *const graph, Operation *const o { const auto *ifmConn = operation->Input(TensorUsage::IFM); const auto *ofmConn = operation->Output(TensorUsage::OFM); - const auto *attr = operation->Attribute(); + auto *attr = operation->Attribute(); const Shape begin = attr->begin; const Shape size = attr->size; @@ -1520,6 +1684,7 @@ Operation *GraphIrOptimiser::RewriteReduceSum(Graph *const graph, Operation *con // Create intermediate tensor between Transpose and ReduceSum std::shared_ptr transposeTens = ifmConn->tensor->Clone(); + transposeTens->SetBuffer(nullptr); transposeTens->SetName(ifmConn->tensor->Name() + "_transpose"); transposeTens->SetStorageShape(ifmShape3D.Extract(0, 2, 1)); @@ -1665,10 +1830,7 @@ Operation *GraphIrOptimiser::RewriteTile(Graph *const, Operation *const operatio assert(params); // Convert params tensor to vector - auto view = params->tensor->View(); - assert(params->tensor->Type() == DataType::Int32); - - Shape multiples(view.Buffer()->Data(), view.ViewShape().Elements()); + Shape multiples = TensorToShape(params->tensor.get(), params->shape.Elements()); // axisMask contains ones for every axis that needs to be tiled. // e.g. if H,W are tiled, axisMask will be 0110 @@ -2019,7 +2181,6 @@ Operation *GraphIrOptimiser::RewriteTransposeConvOFMPadding(Graph *const graph, auto attr = operation->Attribute(); assert(attr); assert(attr->outPadTBLR.IsValid()); - assert(attr->outShape.IsValid()); if ( attr->outPadTBLR.IsEmpty() ) { // no out-padding diff --git a/ethosu/regor/compiler/graphir_optimiser.hpp b/ethosu/regor/compiler/graphir_optimiser.hpp index 1dd25cdf..13691e69 100644 --- a/ethosu/regor/compiler/graphir_optimiser.hpp +++ b/ethosu/regor/compiler/graphir_optimiser.hpp @@ -43,6 +43,8 @@ private: Operation *ConstPropagation(Graph *const graph, Operation *const operation); Operation *RewriteConst(Graph *const graph, Operation *const operation); Operation *ConvertAttributes(Graph *const graph, Operation *const operation); + Operation *ConvertAttributeTensors(Graph *const graph, Operation *const operation); + Operation *ConvertZeroPointTensors(Graph *const graph, Operation *const operation); Operation *ConvertResizeOffsets(Graph *const graph, Operation *const operation); Tensor *ConvertInt48Tensors(Graph *graph, Tensor *tensor); Tensor *ConvertBool8Tensors(Graph *graph, Tensor *tensor); @@ -117,6 +119,8 @@ private: { {}, { + &GraphIrOptimiser::ConvertAttributeTensors, + &GraphIrOptimiser::ConvertAttributes, &GraphIrOptimiser::ConstPropagation, }, true, @@ -124,7 +128,7 @@ private: { {}, { - &GraphIrOptimiser::ConvertAttributes, + &GraphIrOptimiser::ConvertZeroPointTensors, &GraphIrOptimiser::RewriteRescaleInputs, &GraphIrOptimiser::RemoveRescaleUnsignedAttribute, &GraphIrOptimiser::FuseRescale, // First pass fuse all possible ifm and ofm rescales diff --git a/ethosu/regor/compiler/high_level_command_stream_generator.cpp b/ethosu/regor/compiler/high_level_command_stream_generator.cpp index 0141d6a4..b3efcf5c 100644 --- a/ethosu/regor/compiler/high_level_command_stream_generator.cpp +++ b/ethosu/regor/compiler/high_level_command_stream_generator.cpp @@ -24,6 +24,7 @@ #include "common/box.hpp" #include "common/numeric_util.hpp" #include "common/vector_span.hpp" +#include "compiler/operation_util.hpp" #include "high_level_command_stream.hpp" #include "scheduler.hpp" @@ -448,9 +449,7 @@ static std::shared_ptr MakeOperation(SchedulerOperation *schedOp, auto *ifmConn = schedOp->Input(TensorUsage::IFM); auto *params = schedOp->Input(TensorUsage::Params); assert(params); - assert(params->Type() == DataType::Int32); - auto view = params->tensor->srcTensor->View(); - Shape multiples(view.Buffer()->Data(), view.ViewShape().Elements()); + Shape multiples = TensorToShape(params->tensor->srcTensor.get(), params->shape.Elements()); multiples = Shape::PadAxes(multiples, ifmConn->shape.Size(), 1); unsigned axisMask = multiples.GreaterMask(multiples.WithOnes()); assert((axisMask == 0 || IsPowerOfTwo(axisMask)) && "TILE operation should only have one tiled axis"); diff --git a/ethosu/regor/compiler/kernel.hpp b/ethosu/regor/compiler/kernel.hpp index 6c7a5ad4..9187d7e4 100644 --- a/ethosu/regor/compiler/kernel.hpp +++ b/ethosu/regor/compiler/kernel.hpp @@ -91,7 +91,6 @@ public: _padding = Margin(kernel->paddingTBLRNF[0], kernel->paddingTBLRNF[2], kernel->paddingTBLRNF[1], kernel->paddingTBLRNF[3], kernel->paddingTBLRNF[4], kernel->paddingTBLRNF[5]); _depthMultiplier = 0; - assert(_size.x > 0 && _size.y > 0); } Kernel(Point2i size, Point2i stride, Point2i dilation, int depthMultiplier = 1, Margin padding = Margin(0, 0, 0, 0)) diff --git a/ethosu/regor/compiler/op_type.hpp b/ethosu/regor/compiler/op_type.hpp index 512b0d18..ab0d8570 100644 --- a/ethosu/regor/compiler/op_type.hpp +++ b/ethosu/regor/compiler/op_type.hpp @@ -35,11 +35,12 @@ enum class OpType : uint16_t Conv2D, Conv3D, DepthwiseConv2D, - FullyConnected, MatMul, MaxPool, + // RFFT TransposeConv2D, Clamp, + // Erf Sigmoid, Tanh, Add, @@ -73,8 +74,8 @@ enum class OpType : uint16_t Equal, Greater, GreaterEqual, - ReduceAny, ReduceAll, + ReduceAny, ReduceMax, ReduceMin, ReduceProduct, @@ -94,6 +95,11 @@ enum class OpType : uint16_t Identity, If, While, + // Yield + // Variable + // VariabeWrite + // VariableRead + // ConstShape // Regor Internal Operators MemoryCopy, @@ -129,6 +135,7 @@ enum class OpType : uint16_t Fill, FloorDiv, FloorMod, + FullyConnected, GatherNd, GatherV2, HardSwish, diff --git a/ethosu/regor/compiler/operation_util.hpp b/ethosu/regor/compiler/operation_util.hpp index a52640ef..d8dbfd19 100644 --- a/ethosu/regor/compiler/operation_util.hpp +++ b/ethosu/regor/compiler/operation_util.hpp @@ -78,6 +78,38 @@ inline std::shared_ptr CreateConstTensor(const std::string &name, DataTy }; } +// Returns the DataType scalar value from a Buffer as the templated type +template +TYPE Scalar(const Buffer &from, DataType type) +{ + assert(from.Size() >= DataTypeStorageSizeBytes(type, 1) && "Not enough data for scalar of DataType"); + switch ( type ) + { + case DataType::Int4Packed8: + return TYPE((from.Data()[0] << 4) >> 4); + case DataType::Bool8: + return TYPE(from.Data()[0]); + case DataType::Int48: + return TYPE(int64_t(from.Data()[0])); +#define TYPE_FUNC(x) \ + case DataTypeOf::value: \ + return TYPE(from.Data()[0]) + FOR_ALL_INT_TYPES(TYPE_FUNC, ;); +#undef TYPE_FUNC + default: + assert(false && "Unexpected DataType"); + return TYPE(from.Data()[0]); + } +} + +// Returns the scalar value of a Tensor's buffer as the templated type +template +TYPE Scalar(const Tensor &from) +{ + assert(from.IsConstant() && "Tensor has no constant buffer"); + return Scalar(*from.Buffer(), from.Type()); +} + // Convert a constant Tensor to a Shape // Parameters: // - tensor: Tensor to convert to shape. diff --git a/ethosu/regor/compiler/scheduler_decompose.cpp b/ethosu/regor/compiler/scheduler_decompose.cpp index 3503973e..cd203987 100644 --- a/ethosu/regor/compiler/scheduler_decompose.cpp +++ b/ethosu/regor/compiler/scheduler_decompose.cpp @@ -39,6 +39,11 @@ Flags OperatorQuery(Architecture *arch, const SchedulerOperation *s Set(query.ifm[0], schedOp->IFM(0)); Set(query.ifm[1], schedOp->TryIFM(1)); Set(query.ofm, ofmConn); + const auto weights = schedOp->TryInput(TensorUsage::Weights); + const auto scales = schedOp->TryInput(TensorUsage::Scales); + const bool constantWeights = weights && weights->tensor && weights->tensor->IsConstant(); + const bool constantScales = scales && scales->tensor && scales->tensor->IsConstant(); + query.weightFormat = constantWeights && constantScales ? WeightFormat::Default : WeightFormat::None; query.transposeMask = ofmConn->transpose; query.reverseMask = ofmConn->reverse; query.kernel = schedOp->Kernel(); @@ -240,10 +245,17 @@ bool NeedsDecompose(Architecture *arch, const SchedulerOperation *schedOp) bool CanDecompose(Architecture *, const SchedulerOperation *schedOp) { - if ( schedOp->Type() == OpType::Conv2D ) return true; - if ( schedOp->Type() == OpType::Conv3D ) return true; - if ( schedOp->Type() == OpType::DepthwiseConv2D ) return true; - if ( schedOp->Type() == OpType::TransposeConv2D ) return true; + // TODO: This additional check can be removed when decomposing + // non-constant weights/scales is implemented + auto weights = schedOp->TryInput(TensorUsage::Weights); + auto scales = schedOp->TryInput(TensorUsage::Scales); + bool constantWeights = weights && weights->tensor && weights->tensor->IsConstant(); + bool constantScales = scales && scales->tensor && scales->tensor->IsConstant(); + + if ( schedOp->Type() == OpType::Conv2D && constantWeights && constantScales ) return true; + if ( schedOp->Type() == OpType::Conv3D && constantWeights && constantScales ) return true; + if ( schedOp->Type() == OpType::DepthwiseConv2D && constantWeights && constantScales ) return true; + if ( schedOp->Type() == OpType::TransposeConv2D && constantWeights && constantScales ) return true; if ( DecomposeAsElementwise(schedOp->Type()) || schedOp->Type() == OpType::MemoryCopy ) return true; if ( schedOp->Type() == OpType::MatMul ) return true; if ( schedOp->Type() == OpType::Resize ) return true; diff --git a/ethosu/regor/compiler/tensor.cpp b/ethosu/regor/compiler/tensor.cpp index 1b2fc6f6..955bf081 100644 --- a/ethosu/regor/compiler/tensor.cpp +++ b/ethosu/regor/compiler/tensor.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2021-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -42,7 +42,7 @@ Tensor::Tensor(const std::string &name, DataType type, Shape shape) : { } -Tensor::Tensor(const std::string &name, DataType type, Shape shape, const std::shared_ptr &buffer) : +Tensor::Tensor(const std::string &name, DataType type, Shape shape, const std::shared_ptr &buffer) : _name(name), _type(type), _uid(GenerateUniqueId()), _storageShape(shape), _buffer(buffer) { assert(DataTypeStorageSizeBytes(type, shape.Elements()) <= buffer->Size()); diff --git a/ethosu/regor/compiler/tensor.hpp b/ethosu/regor/compiler/tensor.hpp index 805697a3..fe28f948 100644 --- a/ethosu/regor/compiler/tensor.hpp +++ b/ethosu/regor/compiler/tensor.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2021-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -55,7 +55,7 @@ private: public: Tensor(const std::string &name, DataType type); Tensor(const std::string &name, DataType type, Shape shape); - Tensor(const std::string &name, DataType type, Shape shape, const std::shared_ptr &buffer); + Tensor(const std::string &name, DataType type, Shape shape, const std::shared_ptr &buffer); const std::string &Name() const { return _name; } void SetName(const std::string &name) { _name = name; } @@ -65,6 +65,7 @@ public: const Shape &StorageShape() const { return _storageShape; } void SetStorageShape(const Shape &shape) { _storageShape = shape; } void SetBuffer(const std::shared_ptr &buffer) { _buffer = buffer; } + const class Buffer *Buffer() const { return _buffer.get(); } BufferView View() const; bool IsConstant() const; diff --git a/ethosu/regor/compiler/tensor_properties.hpp b/ethosu/regor/compiler/tensor_properties.hpp index 0ae2508f..b4ca09ce 100644 --- a/ethosu/regor/compiler/tensor_properties.hpp +++ b/ethosu/regor/compiler/tensor_properties.hpp @@ -60,6 +60,7 @@ enum class TensorUsage : uint32_t Params0 = Params, Params1 = 0x100 | Params, Params2 = 0x200 | Params, + Params3 = 0x300 | Params, Scratch0 = Scratch, }; diff --git a/ethosu/regor/compiler/tflite_graph_optimiser.cpp b/ethosu/regor/compiler/tflite_graph_optimiser.cpp index dc15f09e..9fd7a2a7 100644 --- a/ethosu/regor/compiler/tflite_graph_optimiser.cpp +++ b/ethosu/regor/compiler/tflite_graph_optimiser.cpp @@ -137,9 +137,8 @@ Operation *TFLiteGraphOptimiser::ConvertLeakyRelu16bit(TensorConnection &ifmConn assert(params->tensor->IsConstant()); assert(params->tensor->Type() == DataType::Int16); assert(params->quantization.zeroPoints.size() > 0); - auto view = params->tensor->View(); // Set scalar and alphaQuant accordingly - scalar = int64_t(view.Values()[0]) - params->quantization.zeroPoints[0]; + scalar = Scalar(*params->tensor) - params->quantization.zeroPoints[0]; alphaQuant = params->quantization; } @@ -231,18 +230,11 @@ int TFLiteGraphOptimiser::GetAxis(const Operation *const operation) axis = operation->Attribute()->axis; break; case OpType::Split: - { - auto *paramConn = operation->Input(TensorUsage::Params); - axis = paramConn->tensor->View().Values()[0]; + axis = Scalar(*operation->Input(TensorUsage::Params)->tensor); break; - } case OpType::SplitV: - { - auto usage = MakeTensorUsage(TensorUsage::Params, 1); - auto *paramConn = operation->Input(usage); - axis = paramConn->tensor->View().Values()[0]; + axis = Scalar(*operation->Input(TensorUsage::Params1)->tensor); break; - } default: break; } @@ -676,8 +668,10 @@ Operation *TFLiteGraphOptimiser::RewriteSlice(Graph *const graph, Operation *con assert(sliceOffset + sliceShape <= ifmConn->shape); assert(sliceOffset >= ifmConn->shape.WithZeros()); assert(sliceShape == ofmConn->shape); - attr->size = sliceShape; - attr->begin = sliceOffset; + // Update the shape tensor to guarantee no -1 values + sizeParamConn->tensor->SetBuffer(nullptr); + sizeParamConn->tensor->ChangeType(DataType::Int32); + sizeParamConn->tensor->SetBuffer(std::make_shared(sliceShape.ToList())); } return returnOp; } @@ -1036,7 +1030,7 @@ Operation *TFLiteGraphOptimiser::ConvertScatter(Graph *const graph, Operation *c int N = 1; // Calculate GraphIR Scatter K dim - int K = shapeConn->tensor->View().Values()[0]; + int K = Scalar(*shapeConn->tensor); // Calculate GraphIR Scatter W dim int W = 1; @@ -2009,22 +2003,8 @@ Operation *TFLiteGraphOptimiser::ConvertPrelu(Graph *const graph, Operation *con int64_t alphaZp = 0; int alphaMin = 0; int alphaMax = 0; - BufferReader reader; - switch ( params->tensor->Type() ) - { - case DataType::Int8: - reader = alpha.Values(); - break; - case DataType::UInt8: - reader = alpha.Values(); - break; - case DataType::Int16: - reader = alpha.Values(); - break; - default: - assert(false); - }; - auto alphaMinMax = std::minmax_element(reader.begin(), reader.end()); + auto values = alpha.Values(params->tensor->Type()); + auto alphaMinMax = std::minmax_element(values.begin(), values.end()); alphaMin = *alphaMinMax.first; alphaMax = *alphaMinMax.second; if ( alphaQuant.zeroPoints.size() ) @@ -2269,19 +2249,10 @@ Operation *TFLiteGraphOptimiser::Convert8bitLeakyReluToLUT(Graph *const graph, O assert(params->tensor->IsConstant()); assert(params->quantization.scales.size() > 0); assert(params->quantization.zeroPoints.size() > 0); - auto view = params->tensor->View(); QuantizedScale alphaQuant = QuantizedScale(alpha); auto alphaZp = params->quantization.zeroPoints[0]; - if ( params->tensor->Type() == DataType::Int8 ) - { - scalar = int64_t(view.Values()[0]) - alphaZp; - alphaQuant = params->quantization.scales[0]; - } - else if ( params->tensor->Type() == DataType::UInt8 ) - { - scalar = int64_t(view.Values()[0]) - alphaZp; - alphaQuant = params->quantization.scales[0]; - } + scalar = Scalar(*params->tensor) - alphaZp; + alphaQuant = params->quantization.scales[0]; alphaScale = ElementwiseMulScale(ifmScale, alphaQuant.Dequantize(), ofmScale); } @@ -2440,17 +2411,7 @@ int TFLiteGraphOptimiser::GetPadValue(BufferReader &padValues, int numPadVa BufferReader TFLiteGraphOptimiser::GetPadValuesFromTensor(const std::shared_ptr tensor) { - BufferReader padValues; - if ( tensor->Type() == DataType::Int32 ) - { - padValues = tensor->View().Values(); - } - else - { - assert(tensor->Type() == DataType::Int64); - padValues = tensor->View().Values(); - } - return padValues; + return tensor->View().Values(tensor->Type()); } // Lower PadV2 to TOSA Pad @@ -2469,7 +2430,7 @@ Operation *TFLiteGraphOptimiser::ConvertPadV2(Graph *const graph, Operation *con // This is undoing the existing zero point adjustment to counteract the zero point adjustment // which is done in GraphIR lowering of Pad. int zeroPoint = ofmConn->quantization.IsValid() ? static_cast(ofmConn->quantization.zeroPoints[0]) : 0; - attr->pad_const = padConstTens->View().Values(padConstTens->Type())[0] - zeroPoint; + attr->pad_const = Scalar(*padConstTens) - zeroPoint; RecordOptimisation(*operation, padOp.get()); operation->Disconnect(); diff --git a/ethosu/regor/compiler/tosa_graph_validator.cpp b/ethosu/regor/compiler/tosa_graph_validator.cpp index 65d0846a..88531982 100644 --- a/ethosu/regor/compiler/tosa_graph_validator.cpp +++ b/ethosu/regor/compiler/tosa_graph_validator.cpp @@ -27,16 +27,8 @@ namespace std::optional MaybeGetTosaVersion(uint32_t syntaxVersion) { - if ( syntaxVersion == 0 ) syntaxVersion = (GraphApi::VERSION_TOSA_0_80 | GraphApi::PROFILE_BASELINE); - if ( (syntaxVersion & GraphApi::VERSION_TOSA_0_60) == GraphApi::VERSION_TOSA_0_60 ) - { - return GraphApi::VERSION_TOSA_0_60; - } - else if ( (syntaxVersion & GraphApi::VERSION_TOSA_0_80) == GraphApi::VERSION_TOSA_0_80 ) - { - return GraphApi::VERSION_TOSA_0_80; - } - else if ( (syntaxVersion & GraphApi::VERSION_TOSA_1_00) == GraphApi::VERSION_TOSA_1_00 ) + if ( syntaxVersion == 0 ) syntaxVersion = (GraphApi::VERSION_TOSA_1_00 | GraphApi::PROFILE_BASELINE); + if ( (syntaxVersion & GraphApi::VERSION_TOSA_1_00) == GraphApi::VERSION_TOSA_1_00 ) { return GraphApi::VERSION_TOSA_1_00; } @@ -56,7 +48,7 @@ bool TosaGraphValidator::HandlesSyntax(uint32_t syntaxVersion) TosaGraphValidator::TosaGraphValidator(GraphNotation notation, uint32_t syntaxVersion, Compiler *compiler) : GraphValidator(notation, syntaxVersion) { - _context.version = MaybeGetTosaVersion(syntaxVersion).value_or(GraphApi::VERSION_TOSA_0_60); + _context.version = MaybeGetTosaVersion(syntaxVersion).value_or(GraphApi::VERSION_TOSA_1_00); if ( (syntaxVersion & GraphApi::PROFILE_MAIN) == GraphApi::PROFILE_MAIN ) { diff --git a/ethosu/regor/include/graphapi.hpp b/ethosu/regor/include/graphapi.hpp index ff215ea0..e0af5cec 100644 --- a/ethosu/regor/include/graphapi.hpp +++ b/ethosu/regor/include/graphapi.hpp @@ -52,6 +52,7 @@ enum class GraphTensorUsage : uint32_t Params0 = Params, Params1 = 0x100 | Params, Params2 = 0x200 | Params, + Params3 = 0x300 | Params, }; constexpr inline GraphTensorUsage MakeTensorUsage(GraphTensorUsage type, int index) @@ -76,6 +77,7 @@ enum class AxisOrder : uint16_t /// enum class GraphDataType : uint16_t { + Unknown = 0, Bool8 = 1, Int4Packed8, Int8, @@ -88,6 +90,8 @@ enum class GraphDataType : uint16_t UInt32, UInt48, UInt64, + Float8e4m3, + Float8e5m2, BFloat16, Float16, Float32, @@ -172,8 +176,6 @@ enum class BufferMapping }; // Freeform syntax versioning -static constexpr uint32_t VERSION_TOSA_0_60 = 0x00003C00; -static constexpr uint32_t VERSION_TOSA_0_80 = 0x00005000; static constexpr uint32_t VERSION_TOSA_1_00 = 0x01000000; static constexpr int32_t PROFILE_BASELINE = 0; static constexpr int32_t PROFILE_MAIN = 1; diff --git a/ethosu/regor/include/graphapi_tosa_types.hpp b/ethosu/regor/include/graphapi_tosa_types.hpp index f9fad47f..9d0b80fd 100644 --- a/ethosu/regor/include/graphapi_tosa_types.hpp +++ b/ethosu/regor/include/graphapi_tosa_types.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -38,6 +38,9 @@ enum class DType : uint32_t UINT16 = 9, FP16 = 10, BF16 = 11, + SHAPE = 12, + FP8E4M3 = 13, + FP8E5M2 = 14, }; enum class ResizeMode : uint32_t @@ -122,6 +125,10 @@ enum class Op : uint32_t RFFT2D = 70, ERF = 71, DIM = 72, + VARIABLE = 73, + VARIABLE_WRITE = 74, + VARIABLE_READ = 75, + CONST_SHAPE = 76, }; } // namespace tosa diff --git a/ethosu/regor/test/test_shape.cpp b/ethosu/regor/test/test_shape.cpp index 735b950d..2461ac89 100644 --- a/ethosu/regor/test/test_shape.cpp +++ b/ethosu/regor/test/test_shape.cpp @@ -476,3 +476,16 @@ TEST_CASE("Is reduced equal") REQUIRE(Shape::IsReducedEqual(shape2a, shape2b)); REQUIRE(Shape::IsReducedEqual(shape2b, shape2a)); } + +TEST_CASE("Shape: From iterator") +{ + int axes = GENERATE(range(1, MAX_TEST_DIMS, 2)); + std::vector temp = random_vector(axes, 1, 32767); + Shape a(temp.data(), axes); + Shape b(temp.begin(), axes); + Shape c(temp.begin(), temp.end()); + + REQUIRE(!a.IsEmpty()); + REQUIRE(a == b); + REQUIRE(b == c); +} diff --git a/ethosu/regor/test/test_tosa_validator.cpp b/ethosu/regor/test/test_tosa_validator.cpp index f8096d84..f9e70961 100644 --- a/ethosu/regor/test/test_tosa_validator.cpp +++ b/ethosu/regor/test/test_tosa_validator.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2021-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -72,7 +72,6 @@ TEST_CASE("tosa_validator") void CreateTestVectors(GraphApi::IGraphBuilder *builder) { expectedToFail.emplace_back(TestOperation{tosa::Op::ABS, nullptr, "Null GraphOperation"}); - expectedToFail.emplace_back(TestOperation{tosa::Op::DIM, builder->CreateOp(tosa::Op::DIM, nullptr), "DIM not supported in 0.60"}); expectedToFail.emplace_back(TestOperation{tosa::Op::SIGMOID, builder->CreateOp(tosa::Op::SIGMOID, nullptr), "SIGMOID unsupported in BI"}); { auto op{builder->CreateOp(tosa::Op::ABS, nullptr)}; diff --git a/ethosu/regor/tflite/tflite_reader.cpp b/ethosu/regor/tflite/tflite_reader.cpp index 7778eb80..44ed7960 100644 --- a/ethosu/regor/tflite/tflite_reader.cpp +++ b/ethosu/regor/tflite/tflite_reader.cpp @@ -698,12 +698,12 @@ void TfLiteReader::ParseOperatorOptions( int axis = 0; if ( params->tensor->Type() == DataType::Int64 ) { - assert(params->tensor->View().Values()[0] < std::numeric_limits::max() && "Too large Argmax axis attribute"); - axis = ClampToType(params->tensor->View().Values()[0]); + assert(Scalar(*params->tensor) < std::numeric_limits::max() && "Too large Argmax axis attribute"); + axis = ClampToType(Scalar(*params->tensor)); } else { - axis = params->tensor->View().Values()[0]; + axis = Scalar(*params->tensor); } if ( axis < 0 ) { diff --git a/ethosu/regor/tosa/tosaValidationGenerator.rb b/ethosu/regor/tosa/tosaValidationGenerator.rb index 46440a55..b2f9bfe0 100755 --- a/ethosu/regor/tosa/tosaValidationGenerator.rb +++ b/ethosu/regor/tosa/tosaValidationGenerator.rb @@ -94,19 +94,22 @@ REGOR_OP_NAMES = { #'DIM', 'OpType::CurrentlyUnsupported'}, } def parse_options - options = {profile: 'BI'} + options = {profile: 'BI', extensions: ['EXT-INT16', 'EXT-INT4']} OptionParser.new do |opts| opts.banner = "Usage: tosaValidationGenerator [options]" opts.on('-s [ARG]', '--specification [ARG]', "Path to the TOSA Specification git.") do |v| options[:spec] = v end - opts.on('-p [ARG]', '--profile [ARG]', "TOSA profile (BI|MI)") do |v| + opts.on('-p [ARG]', '--profile [ARG]', "TOSA profile (BI|MI|PRO-INT|PRO-FP)") do |v| options[:profile] = v end opts.on('-h', '--help', 'Display this help') do puts opts exit end + opts.on('--extensions x,y,z', Array, "Supported extensions") do |extensions| + options[:extensions] = extensions + end end.parse! if (options[:spec].nil?) abort("No specification path (-s/--specification option required) ") @@ -114,7 +117,7 @@ def parse_options if (!File.file?("%s/tosa.xml" % options[:spec]) || !File.file?("%s/tosa_spec.adoc" % options[:spec])) abort("No TOSA Specification found at %s" % options[:spec]) end - if (options[:profile] != 'BI') + if (options[:profile] != 'BI' && options[:profile] != 'PRO-INT') abort("Profile %s not supported." % options[:profile]) end options @@ -164,7 +167,7 @@ class TosaValidator end def versioned_nametag - specArgs = [@specVersion[:major], @specVersion[:minor], @specVersion[:patch], @specVersion[:draft] ? "_draft":"", @profile] + specArgs = [@specVersion[:major], @specVersion[:minor], @specVersion[:patch], @specVersion[:draft] ? "_draft":"", @profile.tr('-','_')] nametag = "Version_%s_%s_%s%s_Profile_%s" % specArgs end @@ -501,7 +504,7 @@ class TosaValidator f.write emit_file_header(is_header: true) f.write "#include \"include/graphapi.hpp\"\n\n" - f.write "#include \n\n" + f.write "#include \n" f.write "#include \n\n" f.write "namespace GraphApi\n{\n" f.write"struct GraphOperation;\n" @@ -516,10 +519,10 @@ class TosaValidator @level_limits.each { |name, level| f.write indent(1) + "Level%s,\n" % name} f.write "};\n\n" f.write "struct Context\n{\n" - f.write indent(1) + "uint32_t version = GraphApi::VERSION_TOSA_0_80;\n" + f.write indent(1) + "uint32_t version = GraphApi::VERSION_TOSA_1_00;\n" f.write indent(1) + "int32_t profile = GraphApi::PROFILE_BASELINE;\n" f.write indent(1) + "Level level = Level::Level8K;\n" - f.write indent(1) + "std::function GetGraph;\n" + f.write indent(1) + "std::function GetGraph;\n" f.write "};\n\n" versions.each { |version| f.write "%s\n" % version } f.write "void ValidateOperator(const GraphApi::GraphOperation *graphOp, const Context &context = Context{})" @@ -553,7 +556,7 @@ class TosaValidator patch_string = (patch.empty? || patch.to_i == 0 ? "" : "_" + patch) draft_string = draft == "_" ? "" : "_DRAFT" tosa_version_const = "GraphApi::VERSION_TOSA_%d_%02d%s%s" % [major.to_i, minor.to_i, patch_string, draft_string] - if (profile == "BI") + if (profile == "BI" || profile == "PRO_INT") tosa_profile_const = "GraphApi::PROFILE_BASELINE" elsif (profile == "MAIN") tosa_profile_const = "GraphApi::PROFILE_MAIN" @@ -593,7 +596,7 @@ class Argument @shape = xml_argument['shape'] parse_dimensions @element_type = xml_argument['tensor-element-type'] - if (@element_type == nil && @type != nil) + if ((@element_type == nil || @element_type == '-') && @type != nil) @element_type = @type.chomp('*') end rank_node = xml_argument.get_elements('rank').first @@ -739,15 +742,31 @@ class Operation typenames = [] xml_typesupports = xml_operator.get_elements('./typesupport') if (xml_typesupports.size() > 0) - xml_types = xml_operator.get_elements('./types').first.get_elements('./type') - xml_types.each {|xml_type| typenames.append(xml_type['name'])} - xml_typesupports.each {|t| parse_typesupport(t, types, typenames)} + xml_types_elem = xml_operator.get_elements('./types') + if (xml_types_elem.size() > 0) + xml_types = xml_types_elem.first.get_elements('./type') + xml_types.each {|xml_type| typenames.append(xml_type['name'])} + xml_typesupports.each {|t| parse_typesupport(t, types, typenames)} + end end types end - def parse_typesupport(xml_typesupport, types, typenames) + def supported_type(xml_typesupport) if (xml_typesupport.children.size == 0) + true + else + supported_profiles = $options[:extensions] + supported_profiles.append($options[:profile]) + profiles = [] + op_profiles = xml_typesupport.get_elements('./op_profile') + op_profiles.each {|op_profile| profiles.append(op_profile['name'])} + (supported_profiles & profiles).any? + end + end + + def parse_typesupport(xml_typesupport, types, typenames) + if (supported_type(xml_typesupport)) mode_types = {} mode = xml_typesupport['mode'] typenames.each {|typename| mode_types[typename] = xml_typesupport[typename] } @@ -820,11 +839,11 @@ def name_tag(obj) tag end -options = parse_options -xmlfile = File.new("%s/tosa.xml" % options[:spec]) +$options = parse_options +xmlfile = File.new("%s/tosa.xml" % $options[:spec]) xml = REXML::Document.new(xmlfile) -doc = Asciidoctor.load_file "%s/tosa_spec.adoc" % options[:spec], safe: :safe, attributes: "generated=%s/out/gen pseudocode=%s/pseudocode" % [File.expand_path(options[:spec]), File.expand_path(options[:spec])] -validator = TosaValidator.new(xml, doc, 'BI', '8K') +doc = Asciidoctor.load_file "%s/tosa_spec.adoc" % $options[:spec], safe: :safe, attributes: "generated=%s/out/gen pseudocode=%s/pseudocode" % [File.expand_path($options[:spec]), File.expand_path($options[:spec])] +validator = TosaValidator.new(xml, doc, $options[:profile], '8K') validator.update_argument_checks validator.update_error_checks validator.update_level_checks diff --git a/ethosu/regor/tosa/tosa_argument_checks.cpp b/ethosu/regor/tosa/tosa_argument_checks.cpp index 9ca6b741..2642197b 100644 --- a/ethosu/regor/tosa/tosa_argument_checks.cpp +++ b/ethosu/regor/tosa/tosa_argument_checks.cpp @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 -// Modify by implementing the constraints. +// Partially generated by tosaValidationGenerator for TOSA Specification 1.0.0draft +// TODO: Implement the constraints. #include "tosa/tosa_argument_checks.hpp" @@ -64,7 +64,10 @@ const std::unordered_set tosaSupportedTypes = { "tosa_graph_t", "shape_t", "acc_size_t", + "acc_type_t", "resize_mode_t", + "nan_propagation_mode_t", + "rounding_mode_t", }; std::optional MapType(const std::string_view &type) @@ -88,9 +91,11 @@ std::optional MapType(const std::string_view &type) {"bf16_t", DataType::BFloat16}, {"fp32_t", DataType::Float32}, {"index_t", DataType::Int32}, - {"shape_t", DataType::Int32}, + {"shape_t", DataType::Int64}, {"acc_size_t", DataType::Int32}, {"resize_mode_t", DataType::Int32}, + // The actual mul_t type is determined by an attribute + {"mul_t", DataType::Int16}, }; if ( auto p = typeMap.find(type); p != typeMap.end() ) return p->second; @@ -121,11 +126,31 @@ const TensorConnection *GetTensorForArgument(const Operation *op, const Argument {"offset", TensorUsage::Params1}, {"border", TensorUsage::Params2}, {"padding", TensorUsage::Params}, + {"pad_const", TensorUsage::Params1}, + {"start", TensorUsage::Params}, + {"size", TensorUsage::Params1}, + {"input_zp", TensorUsage::Params}, + {"input1_zp", TensorUsage::Params}, + {"A_zp", TensorUsage::Params}, + {"B_zp", TensorUsage::Params1}, + {"weight_zp", TensorUsage::Params1}, + {"output_zp", TensorUsage::Params1}, + {"shape", TensorUsage::Params}, + {"table", TensorUsage::Params}, }; switch ( argument.category ) { case Category::Input: // input, weight, bias, input_real, input_imag, A, B, input1, input2, input3 + if ( op->Type() == OpType::Rescale ) // Rescale does not follow the pattern the other ops use + { + if ( argument.name == "input_zp" ) return op->Input(TensorUsage::Params2); + if ( argument.name == "output_zp" ) return op->Input(TensorUsage::Params3); + } + // Mul shift parameter does not have the same pattern as Rescale + if ( op->Type() == OpType::Mul && argument.name == "shift" ) return op->Input(TensorUsage::Params); + // Scatter input parameter does not have the same pattern as other operations + if ( op->Type() == OpType::Scatter && argument.name == "input" ) return op->Input(TensorUsage::IFM2); if ( auto p = inputUsageMap.find(argument.name); p != inputUsageMap.end() ) { return op->Input(p->second); @@ -225,8 +250,6 @@ void ValidateArgumentShapes(const regor::Operation *op, const std::vectorOutput(regor::TensorUsage::OFM)->shape.Size(); if ( inRank != outRank ) throw std::invalid_argument("Tensor ranks different"); if ( inRank < 1 ) throw std::invalid_argument("Tensor rank < 1"); - if ( context.version == GraphApi::VERSION_TOSA_0_60 && inRank > 4 ) - throw std::invalid_argument("Tensor rank > 4"); } for ( const auto &argument : arguments ) ValidateArgumentShape(op, *argument, context); @@ -276,6 +299,12 @@ bool ResolveAndValidateArgument(const regor::Operation *op, const Argument *argu // Unsgined check failed, signed check will be done below } + if ( argument->category == Category::Input && argument->name == "multiplier" ) + { + auto *r_attr = op->Attribute(); + *expectedType = r_attr->scale32 ? DataType::Int32 : DataType::Int16; + // Validate argument below + } } return ValidateArgument(op, argument, *expectedType); } diff --git a/ethosu/regor/tosa/tosa_argument_checks.hpp b/ethosu/regor/tosa/tosa_argument_checks.hpp index 7ccc8d30..7402c42f 100644 --- a/ethosu/regor/tosa/tosa_argument_checks.hpp +++ b/ethosu/regor/tosa/tosa_argument_checks.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #pragma once diff --git a/ethosu/regor/tosa/tosa_error_checks.cpp b/ethosu/regor/tosa/tosa_error_checks.cpp index 20b989e3..b3170cee 100644 --- a/ethosu/regor/tosa/tosa_error_checks.cpp +++ b/ethosu/regor/tosa/tosa_error_checks.cpp @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 -// Modify by implementing the constraints. +// Partially generated by tosaValidationGenerator for TOSA Specification 1.0.0draft +// TODO: Implement the constraints. #include "tosa_error_checks.hpp" @@ -24,10 +24,12 @@ #include "compiler/attributes.hpp" #include "compiler/graph.hpp" #include "compiler/operation.hpp" +#include "compiler/operation_util.hpp" using regor::DataType; using regor::Operation; using regor::ordered_map; +using regor::Scalar; using regor::Tensor; using regor::TensorConnection; using regor::TensorUsage; @@ -60,21 +62,32 @@ static Shape broadcastShape(const Shape &shape1, const Shape &shape2) return shape; } +static Shape GetShapeFromValues(const Tensor *tensor) +{ + if ( tensor ) + { + assert(tensor->IsConstant()); + const auto values = tensor->View().Values(tensor->Type()); + return Shape(values.begin(), values.Count()); + } + return {}; +} + namespace tosa { namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 -void ErrorIfCheck_ai0sdq9wgm72(const regor::Operation *op, [[maybe_unused]] const Context &context) +// Checks for TOSA Specification 1.0.0draft +void ErrorIfCheck_3tg4p2a5te0jy(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: ARGMAX, - static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= rank(shape1) || rank(shape1) > 4)"; + // Operators: REDUCE_ALL, REDUCE_ANY, REDUCE_MAX, REDUCE_MIN, REDUCE_PRODUCT, REDUCE_SUM, + static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= rank(shape1))"; const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); + const auto &inputShape = op->Input(TensorUsage::IFM)->shape; auto *attr = op->Attribute(); - const auto axis = attr->axis; - if ( axis < 0 || axis >= rank || rank > 4 ) throw std::invalid_argument(constraint); + if ( attr->axis < 0 || attr->axis >= rank ) throw std::invalid_argument(constraint); } void ErrorIfCheck_gpp861oen43y(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -88,25 +101,23 @@ void ErrorIfCheck_gpp861oen43y(const regor::Operation *op, [[maybe_unused]] cons if ( outputShape != expectedOutputShape ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_1vu5c1tytwmhu(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2nanft1ivm5fj(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: AVG_POOL2D, - static constexpr char constraint[] = "ERROR_IF(in_out_t != int8_t && input_zp != 0)"; - const auto *input = op->Input(TensorUsage::IFM); - auto in_out_t = input->tensor->Type(); - auto &zp = input->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && input_zp != 0 ) throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(!is_same() && input_zp != 0)"; + const auto in_t = op->IFM(0)->Type(); + const auto zp_tensor = op->Input(TensorUsage::Params)->tensor.get(); + const auto input_zp = zp_tensor->View().Values(zp_tensor->Type())[0]; + if ( in_t != DataType::Int8 && input_zp != 0 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_1n0denkrrrlr1(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1ga3gcg4zkrkv(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: AVG_POOL2D, NEGATE, - static constexpr char constraint[] = "ERROR_IF(in_out_t != int8_t && output_zp != 0)"; - auto in_out_t = op->OFM()->Type(); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && output_zp != 0 ) throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(!is_same() && output_zp != 0)"; + const auto out_t = op->OFM()->Type(); + const auto output_zp = Scalar(*op->Input(TensorUsage::Params1)->tensor); + if ( out_t != DataType::Int8 && output_zp != 0 ) throw std::invalid_argument(constraint); } void ErrorIfCheck_36r4wpx3psd81(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -189,23 +200,22 @@ void ErrorIfCheck_1c57olj698f3d(const regor::Operation *op, [[maybe_unused]] con if ( !shapeCheck(input, 3, output, 3) ) throw std::invalid_argument(constraint); // C } -void ErrorIfCheck_1hby1qurzja4f(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1hrio849y2qnx(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: CONV2D, CONV3D, DEPTHWISE_CONV2D, FULLY_CONNECTED, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(in_t != int8_t && input_zp != 0)"; - auto in_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; + // Operators: CONV2D, CONV3D, DEPTHWISE_CONV2D, TRANSPOSE_CONV2D, + static constexpr char constraint[] = "ERROR_IF(!is_same() && input_zp != 0)"; + const auto in_t = op->IFM(0)->Type(); + const auto input_zp = Scalar(*op->Input(TensorUsage::Params)->tensor); if ( in_t != DataType::Int8 && input_zp != 0 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_1md8k265hfj92(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_31vgfyg6fi9t6(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: CONV2D, CONV3D, DEPTHWISE_CONV2D, FULLY_CONNECTED, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(weight_t != int8_t && weight_zp != 0)"; - auto weight_t = op->Input(TensorUsage::Weights)->tensor->Type(); - auto &zp = op->Input(TensorUsage::Weights)->quantization.zeroPoints; - auto weight_zp = zp.empty() ? 0 : zp[0]; + // Operators: CONV2D, + static constexpr char constraint[] = "ERROR_IF(!is_same() && weight_zp != 0)"; + const auto weight_t = op->Input(TensorUsage::Weights)->tensor->Type(); + const auto zp_param = op->Input(TensorUsage::Params1); + const auto weight_zp = Scalar(*zp_param->tensor); if ( weight_t != DataType::Int8 && weight_zp != 0 ) throw std::invalid_argument(constraint); } @@ -274,6 +284,16 @@ void ErrorIfCheck_147wc580l2tik(const regor::Operation *op, [[maybe_unused]] con if ( OW != numerator / stride.x + 1 ) throw std::invalid_argument(constraint); } +void ErrorIfCheck_1gr4n0iszdlxr(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CONV2D, CONV3D, FULLY_CONNECTED, TRANSPOSE_CONV2D, + static constexpr char constraint[] = "ERROR_IF(BC != OC && BC != 1)"; + const auto *bias = op->Input(TensorUsage::Scales); + const auto *output = op->Output(TensorUsage::OFM); + if ( (bias->shape.Elements() != 1) && !shapeCheck(output, -1, bias, 0) ) + throw std::invalid_argument(constraint); // OC +} + void ErrorIfCheck_2rm8rnsdfn14h(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONV2D, TRANSPOSE_CONV2D, @@ -292,24 +312,6 @@ void ErrorIfCheck_36emtx7zwkk96(const regor::Operation *op, [[maybe_unused]] con if ( !shapeCheck(output, 3, weights, 0) ) throw std::invalid_argument(constraint); // OC } -void ErrorIfCheck_2r9jencgka20o(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV2D, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OH,OW,OC], bias, [OC]))"; - const auto *bias = op->Input(TensorUsage::Scales); - const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(output, 3, bias, 0) ) throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_207p0r46d35m0(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV2D, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(bias, [OC], weight, [OC,KH,KW,IC]))"; - const auto *weight = op->Input(TensorUsage::Weights); - const auto *bias = op->Input(TensorUsage::Scales); - if ( !shapeCheck(bias, 0, weight, 0) ) throw std::invalid_argument(constraint); // OC -} - void ErrorIfCheck_cr43yjpqkcpd(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONV2D, TRANSPOSE_CONV2D, @@ -319,6 +321,15 @@ void ErrorIfCheck_cr43yjpqkcpd(const regor::Operation *op, [[maybe_unused]] cons if ( !shapeCheck(weight, 3, input, 3) ) throw std::invalid_argument(constraint); // IC } +void ErrorIfCheck_3m5ijs493bw6j(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CONV3D, DEPTHWISE_CONV2D, TRANSPOSE_CONV2D, + static constexpr char constraint[] = "ERROR_IF(!is_same() && weight_zp != 0)"; + const auto weight_t = op->Input(TensorUsage::Weights)->tensor->Type(); + const auto weight_zp = Scalar(*op->Input(TensorUsage::Params1)->tensor); + if ( weight_t != DataType::Int8 && weight_zp != 0 ) throw std::invalid_argument(constraint); +} + void ErrorIfCheck_341t6ysqc16b2(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONV3D, @@ -385,24 +396,6 @@ void ErrorIfCheck_27g3t38z1of4h(const regor::Operation *op, [[maybe_unused]] con if ( !shapeCheck(output, 4, weights, 0) ) throw std::invalid_argument(constraint); // OC } -void ErrorIfCheck_95jvn4dzraol(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV3D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OD,OH,OW,OC], bias, [OC]))"; - const auto *bias = op->Input(TensorUsage::Scales); - const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(output, 4, bias, 0) ) throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_21377cjnb1ox7(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV3D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(bias, [OC], weight, [OC,KD,KH,KW,IC]))"; - const auto *weight = op->Input(TensorUsage::Weights); - const auto *bias = op->Input(TensorUsage::Scales); - if ( !shapeCheck(bias, 0, weight, 0) ) throw std::invalid_argument(constraint); // OC -} - void ErrorIfCheck_2cpco8ykx99sa(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONV3D, @@ -412,44 +405,40 @@ void ErrorIfCheck_2cpco8ykx99sa(const regor::Operation *op, [[maybe_unused]] con if ( !shapeCheck(weight, 4, input, 4) ) throw std::invalid_argument(constraint); // IC } -void ErrorIfCheck_10sexbqileii7(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2d0jmyhr9lscf(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: DEPTHWISE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OH,OW,C*M], input, [N,H,W,C]))"; - const auto *input = op->Input(TensorUsage::IFM); + static constexpr char constraint[] = "ERROR_IF(BC != C*M && BC != 1)"; + const auto *bias = op->Input(TensorUsage::Scales); const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(input, 0, output, 0) ) throw std::invalid_argument(constraint); // N + if ( (bias->shape.Elements() != 1) && !shapeCheck(output, 3, bias, 0) ) + throw std::invalid_argument(constraint); // OC = C*M } -void ErrorIfCheck_12rt0p658ac1(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_10td4qt70dp3i(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: DEPTHWISE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OH,OW,C*M], bias, [C*M]))"; + static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OH,OW,C*M], input, [N,IH,IW,C]))"; + const auto *input = op->Input(TensorUsage::IFM); const auto *output = op->Output(TensorUsage::OFM); - const auto *bias = op->Input(TensorUsage::Scales); - if ( !shapeCheck(output, 3, bias, 0) ) throw std::invalid_argument(constraint); // C*M - // Verify M & C from Weights, with shape [KH,KW,C,M] - const auto &weightShape = op->Input(TensorUsage::Weights)->shape; - if ( weightShape.Size() != 4 ) throw std::invalid_argument(constraint); - auto CM = weightShape[2] * weightShape[3]; - if ( output->shape[3] != CM ) throw std::invalid_argument(constraint); + if ( !shapeCheck(input, 0, output, 0) ) throw std::invalid_argument(constraint); // N } -void ErrorIfCheck_3cem64qtn6ajr(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1qxtjwwlh068t(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: DEPTHWISE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(weight, [KH,KW,C,M], input, [N,H,W,C]))"; - const auto *weight = op->Input(TensorUsage::Weights); + static constexpr char constraint[] = "ERROR_IF(shapeCheck(weight, [KH,KW,C,M], input, [N,IH,IW,C]))"; const auto *input = op->Input(TensorUsage::IFM); - if ( !shapeCheck(weight, 2, input, 3) ) throw std::invalid_argument(constraint); // C + const auto *weight = op->Input(TensorUsage::Weights); + if ( !shapeCheck(input, 3, weight, 2) ) throw std::invalid_argument(constraint); // C } void ErrorIfCheck_1hp4djlq1mi8i(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: FFT2D, RFFT2D, static constexpr char constraint[] = "ERROR_IF(!power_of_two(H))"; - bool checkOk = (context.profile != GraphApi::PROFILE_BASELINE); - checkOk = (op != nullptr); // TODO: Implement check when MainInference is supported + bool checkOk = true; + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -457,8 +446,8 @@ void ErrorIfCheck_20r08ymi6c43u(const regor::Operation *op, [[maybe_unused]] con { // Operators: FFT2D, RFFT2D, static constexpr char constraint[] = "ERROR_IF(!power_of_two(W))"; - bool checkOk = (context.profile != GraphApi::PROFILE_BASELINE); - checkOk = (op != nullptr); // TODO: Implement check when MainInference is supported + bool checkOk = true; + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -467,7 +456,7 @@ void ErrorIfCheck_1xwwkxeypcw3j(const regor::Operation *op, [[maybe_unused]] con // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W], input_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -476,7 +465,7 @@ void ErrorIfCheck_vi3hzxbetjyg(const regor::Operation *op, [[maybe_unused]] cons // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W], input_imag, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -485,7 +474,7 @@ void ErrorIfCheck_1m8qk2pbuovev(const regor::Operation *op, [[maybe_unused]] con // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W], output_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -494,7 +483,7 @@ void ErrorIfCheck_1iv4j2x95j8dk(const regor::Operation *op, [[maybe_unused]] con // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_real, [N,H,W], input_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -503,7 +492,7 @@ void ErrorIfCheck_316kdwzc9jf5x(const regor::Operation *op, [[maybe_unused]] con // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_real, [N,H,W], input_imag, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -512,64 +501,17 @@ void ErrorIfCheck_tnr115b4spgw(const regor::Operation *op, [[maybe_unused]] cons // Operators: FFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(input_imag, [N,H,W], input_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_3ufiqep5ipuco(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: FULLY_CONNECTED, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OC], input, [N,IC]))"; - const auto *input = op->Input(TensorUsage::IFM); - const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(input, 0, output, 0) ) throw std::invalid_argument(constraint); // N -} - -void ErrorIfCheck_3kcipzq18dxv9(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: FULLY_CONNECTED, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OC], weight, [OC,IC]))"; - const auto *weights = op->Input(TensorUsage::Weights); - const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(output, 1, weights, 0) ) throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_jcjmr2nnatvv(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: FULLY_CONNECTED, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output, [N,OC], bias, [OC]))"; - const auto *bias = op->Input(TensorUsage::Scales); - const auto *output = op->Output(TensorUsage::OFM); - if ( !shapeCheck(output, 1, bias, 0) ) throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_qwmo2w7hxola(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: FULLY_CONNECTED, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(bias, [OC], weight, [OC,IC]))"; - const auto *weight = op->Input(TensorUsage::Weights); - const auto *bias = op->Input(TensorUsage::Scales); - if ( !shapeCheck(bias, 0, weight, 0) ) throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_c9o11f07skde(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: FULLY_CONNECTED, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(weight, [OC,IC], input, [N,IC]))"; - const auto *weight = op->Input(TensorUsage::Weights); - const auto *input = op->Input(TensorUsage::IFM); - if ( !shapeCheck(weight, 1, input, 1) ) throw std::invalid_argument(constraint); // IC -} - -void ErrorIfCheck_1ellfcuw76b13(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2autvayhidla8(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: MATMUL, - static constexpr char constraint[] = "ERROR_IF(in_t != int8_t && (A_zp != 0 || B_zp != 0))"; - auto in_t = op->IFM(0)->Type(); - auto &zpA = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto A_zp = zpA.empty() ? 0 : zpA[0]; - auto &zpB = op->Input(TensorUsage::IFM1)->quantization.zeroPoints; - auto B_zp = zpB.empty() ? 0 : zpB[0]; + static constexpr char constraint[] = "ERROR_IF(is_same && (A_zp != 0 || B_zp != 0))"; + const auto in_t = op->IFM(0)->Type(); + const auto A_zp = Scalar(*op->Input(TensorUsage::Params0)->tensor); + const auto B_zp = Scalar(*op->Input(TensorUsage::Params1)->tensor); if ( in_t != DataType::Int8 && (A_zp != 0 || B_zp != 0) ) throw std::invalid_argument(constraint); } @@ -603,12 +545,12 @@ void ErrorIfCheck_1azcq4511qzyx(const regor::Operation *op, [[maybe_unused]] con if ( !shapeCheck(B, 1, A, 2) ) throw std::invalid_argument(constraint); // C } -void ErrorIfCheck_15o9wo9pu7mrg(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2befn2dfjcm62(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RFFT2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W/2 + 1], input, [N,H,W]))"; + static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W/2 + 1], input_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -617,16 +559,16 @@ void ErrorIfCheck_13tqdu59nyxyh(const regor::Operation *op, [[maybe_unused]] con // Operators: RFFT2D, static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_imag, [N,H,W/2 + 1], output_real, [N,H,W/2 + 1]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_2kgf2jejxlrr6(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_khc2s3en2uxi(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RFFT2D, - static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_real, [N,H,W/2 + 1], input, [N,H,W]))"; + static constexpr char constraint[] = "ERROR_IF(shapeCheck(output_real, [N,H,W/2 + 1], input_real, [N,H,W]))"; bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check + checkOk = (op != nullptr); // TODO: Implement check when EXT-FFT is supported if ( !checkOk ) throw std::invalid_argument(constraint); } @@ -698,6 +640,36 @@ void ErrorIfCheck_xod9coigx1x2(const regor::Operation *op, [[maybe_unused]] cons if ( attr->max < attr->min ) throw std::invalid_argument(constraint); } +void ErrorIfCheck_15y4an3ceern5(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CLAMP, + static constexpr char constraint[] = "ERROR_IF(isNaN(min_val) || isNaN(max_val))"; + bool checkOk = (context.profile != GraphApi::PROFILE_BASELINE); + checkOk = (op != nullptr); // TODO: Implement check when PRO-FP is supported + if ( !checkOk ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_10u6py7exa66n(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CLAMP, ERF, SIGMOID, TANH, CAST, RESCALE, + static constexpr char constraint[] = "ERROR_IF(rankCheck(output, input))"; + const auto &outputShape = op->Output(TensorUsage::OFM)->shape; + const auto &inputShape = op->Input(TensorUsage::IFM)->shape; + if ( outputShape != inputShape ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_1hynqeiugz9lt(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: ADD, ARITHMETIC_RIGHT_SHIFT, BITWISE_AND, BITWISE_OR, BITWISE_XOR, INTDIV, LOGICAL_AND, + // LOGICAL_LEFT_SHIFT, LOGICAL_RIGHT_SHIFT, LOGICAL_OR, LOGICAL_XOR, MAXIMUM, MINIMUM, MUL, POW, SUB, EQUAL, + // GREATER, GREATER_EQUAL, + static constexpr char constraint[] = "ERROR_IF(shape != broadcast_shape(shape1, shape2))"; + auto shape1 = op->Input(TensorUsage::IFM)->shape; + auto shape2 = op->Input(TensorUsage::IFM1)->shape; + auto shape = op->Output(TensorUsage::OFM)->shape; + if ( shape != broadcastShape(shape1, shape2) ) throw std::invalid_argument(constraint); +} + static bool broadcastOk(const Shape &outShape, const Shape &inShape) { auto inRank = inShape.Size(); @@ -734,23 +706,34 @@ void ErrorIfCheck_3k5ug2w7gxc7r(const regor::Operation *op, [[maybe_unused]] con if ( !broadcastOk(shape, shape2) ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_2gdayq6ofi7wx(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_396rg8p65j58r(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: MUL, - static constexpr char constraint[] = "ERROR_IF(in_t != int32_t && shift > 0)"; - auto in_t = op->IFM(0)->Type(); - auto *attr = op->Attribute(); - if ( in_t != DataType::Int32 && attr->shift > 0 ) throw std::invalid_argument(constraint); + // Operators: TABLE, ABS, BITWISE_NOT, CEIL, CLZ, COS, EXP, FLOOR, LOG, LOGICAL_NOT, NEGATE, RECIPROCAL, RSQRT, SIN, + // REVERSE, IDENTITY, + static constexpr char constraint[] = "ERROR_IF(rankCheck(output, input1))"; + const auto &outputShape = op->Output(TensorUsage::OFM)->shape; + const auto &inputShape = op->Input(TensorUsage::IFM)->shape; + if ( outputShape != inputShape ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_38qvty7pudfz2(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_3l2ksvk26m07h(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: NEGATE, - static constexpr char constraint[] = "ERROR_IF(in_out_t != int8_t && input1_zp != 0)"; - auto in_out_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input1_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && input1_zp != 0 ) throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(!is_same() && input1_zp != 0)"; + const auto in_t = op->IFM(0)->Type(); + const auto input1_zp = Scalar(*op->Input(TensorUsage::Params)->tensor); + if ( in_t != DataType::Int8 && input1_zp != 0 ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_192e2vu3t5aqm(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: SELECT, + static constexpr char constraint[] = "ERROR_IF(shape != broadcast_shape(broadcast_shape(shape1, shape2), shape3))"; + auto shape1 = op->Input(TensorUsage::IFM)->shape; + auto shape2 = op->Input(TensorUsage::IFM1)->shape; + auto shape3 = op->Input(TensorUsage::IFM2)->shape; + auto shape = op->Output(TensorUsage::OFM)->shape; + if ( shape != broadcastShape(broadcastShape(shape1, shape2), shape3) ) throw std::invalid_argument(constraint); } void ErrorIfCheck_3tccsjner0km9(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -763,16 +746,6 @@ void ErrorIfCheck_3tccsjner0km9(const regor::Operation *op, [[maybe_unused]] con if ( !broadcastOk(shape, shape3) ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_3tg4p2a5te0jy(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: REDUCE_ALL, REDUCE_ANY, REDUCE_MAX, REDUCE_MIN, REDUCE_PRODUCT, REDUCE_SUM, - static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= rank(shape1))"; - const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - const auto &inputShape = op->Input(TensorUsage::IFM)->shape; - auto *attr = op->Attribute(); - if ( attr->axis < 0 || attr->axis >= rank ) throw std::invalid_argument(constraint); -} - void ErrorIfCheck_33exz9gn2i1wy(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: REDUCE_ALL, REDUCE_ANY, REDUCE_MAX, REDUCE_MIN, REDUCE_PRODUCT, REDUCE_SUM, @@ -782,72 +755,64 @@ void ErrorIfCheck_33exz9gn2i1wy(const regor::Operation *op, [[maybe_unused]] con if ( shape[attr->axis] != 1 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_14slfd7r77hgh(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2d3qdl1f70i6y(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CONCAT, + static constexpr char constraint[] = "ERROR_IF(input1 == [])"; + if ( op->CountInputs(TensorUsage::IFM) == 0 ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_5y7ov1oeymoa(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= rank(shapes1[0]))"; + static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= max(1,rank(shapes1[0])))"; const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); auto *attr = op->Attribute(); - if ( attr->axis < 0 || attr->axis >= rank ) throw std::invalid_argument(constraint); + if ( attr->axis < 0 || attr->axis >= std::max(1, rank) ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_1fzhf02pkiw9z(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1aloht2b77zby(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(shape[axis] != sum(shape1[k][axis] for all k))"; - const auto &shape = op->Output(TensorUsage::OFM)->shape; - const auto &inputs = op->Inputs(); - auto axis = op->Attribute()->axis; - int64_t sum = 0; - for ( const auto &input : inputs ) + static constexpr char constraint[] = "ERROR_IF(rank(shapes1[shape_index]) != rank(shapes1[0]))"; + const auto ifm0Rank = op->IFM(0)->StorageShape().Size(); + const int count = op->CountInputs(TensorUsage::IFM); + for ( int i = 1; i < count; i++ ) { - auto inputDim = input.shape[axis]; - if ( inputDim < 0 || sum + inputDim > std::numeric_limits::max() ) - throw std::invalid_argument(constraint); - sum += inputDim; + if ( op->IFM(i)->StorageShape().Size() != ifm0Rank ) throw std::invalid_argument(constraint); } - if ( shape[axis] != sum ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_16s99hvsej4fo(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_f1kt9a6h7s2p(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(rank(input_shape) != rank(shapes1[0]))"; - const auto &inputs = op->Inputs(); - auto rank = inputs.front().shape.Size(); - bool checkOk = true; - for ( const auto &input : inputs ) + static constexpr char constraint[] = "ERROR_IF(shapes1[shape_index][axis_index] != shapes1[0][axis_index])"; + const auto attr = op->Attribute(); + const auto ifm0Shape = op->IFM(0)->StorageShape(); + const int count = op->CountInputs(TensorUsage::IFM); + for ( int i = 1; i < count; i++ ) { - if ( input.shape.Size() != rank ) + auto shape = op->IFM(i)->StorageShape(); + for ( int axis = 0; axis < ifm0Shape.Size(); axis++ ) { - checkOk = false; - break; + if ( axis == attr->axis ) continue; + if ( shape[axis] != ifm0Shape[axis] ) throw std::invalid_argument(constraint); } } - if ( !checkOk ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_dctmd6sgn5n0(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_302z1f8mq8lg7(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(input_shape[index] != shapes1[0][index] && index != axis)"; - bool checkOk = true; - const auto &inputs = op->Inputs(); - auto rank = inputs.front().shape.Size(); - auto *attr = op->Attribute(); - for ( const auto &input : inputs ) + static constexpr char constraint[] = "ERROR_IF(axis_sum != shape[axis])"; + const auto axis = op->Attribute()->axis; + const int count = op->CountInputs(TensorUsage::IFM); + int axis_sum = 0; + for ( int i = 0; i < count; i++ ) { - for ( int i = 0; i < rank; i++ ) - { - if ( i != attr->axis && input.shape[i] != inputs.front().shape[i] ) - { - checkOk = false; - break; - } - } - if ( !checkOk ) break; + axis_sum += op->IFM(i)->StorageShape()[axis]; } - if ( !checkOk ) throw std::invalid_argument(constraint); + if ( axis_sum != op->OFM()->StorageShape()[axis] ) throw std::invalid_argument(constraint); } void ErrorIfCheck_14z7y0qe9lwps(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -859,37 +824,28 @@ void ErrorIfCheck_14z7y0qe9lwps(const regor::Operation *op, [[maybe_unused]] con if ( rank != rank1 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_2rfef32dgp3be(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_3dvn5k3273lwz(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: PAD, - static constexpr char constraint[] = "ERROR_IF(padding[i,0] < 0 || padding[i,1] < 0)"; - const auto *padding = op->Input(TensorUsage::Params); - const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - if ( padding->shape.Elements() != 2LL * rank ) throw std::invalid_argument(constraint); - const auto &paddingView = padding->tensor->View(); - if ( paddingView.ViewShape().Elements() != 2LL * rank ) throw std::invalid_argument(constraint); - for ( int i = 0; i < rank; i++ ) + static constexpr char constraint[] = "ERROR_IF(padding[i * 2] < 0 || padding[(i * 2) + 1] < 0)"; + const int rank_shape = op->OFM()->StorageShape().Size(); + const auto padding = GetShapeFromValues(op->Input(TensorUsage::Params)->tensor.get()); + for ( int i = 0; i < rank_shape; i++ ) { - if ( paddingView.Values()[2 * i] < 0 || paddingView.Values()[2 * i + 1] < 0 ) - throw std::invalid_argument(constraint); + if ( padding[i * 2] < 0 || padding[(i * 2) + 1] < 0 ) throw std::invalid_argument(constraint); } } -void ErrorIfCheck_2sfcgak3rj1vs(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_34zvbtwx1r18j(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: PAD, - static constexpr char constraint[] = "ERROR_IF(shape[i] != padding[i, 0] + shape1[i] + padding[i, 1])"; - const auto *padding = op->Input(TensorUsage::Params); - const auto &shape1 = op->Input(TensorUsage::IFM)->shape; - const auto rank = shape1.Size(); - const auto &shape = op->Output(TensorUsage::OFM)->shape; - if ( padding->shape.Elements() != 2LL * rank ) throw std::invalid_argument(constraint); - const auto &paddingView = padding->tensor->View(); - if ( paddingView.ViewShape().Elements() != 2LL * rank ) throw std::invalid_argument(constraint); - for ( int i = 0; i < rank; i++ ) + static constexpr char constraint[] = "ERROR_IF(shape[i] != padding[i * 2] + shape1[i] + padding[(i * 2) + 1])"; + const auto shape = op->OFM()->StorageShape(); + const auto shape1 = op->IFM(0)->StorageShape(); + const auto padding = GetShapeFromValues(op->Input(TensorUsage::Params)->tensor.get()); + for ( int i = 0; i < shape.Size(); i++ ) { - if ( shape[i] != paddingView.Values()[2 * i] + shape1[i] + paddingView.Values()[2 * i + 1] ) - throw std::invalid_argument(constraint); + if ( shape[i] != padding[i * 2] + shape1[i] + padding[(i * 2) + 1] ) throw std::invalid_argument(constraint); } } @@ -916,8 +872,13 @@ void ErrorIfCheck_1nifeiq9rvmb8(const regor::Operation *op, [[maybe_unused]] con // Operators: SLICE, static constexpr char constraint[] = "ERROR_IF(rank(shape1) != length(start) || rank(shape1) != length(size))"; const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto *attr = op->Attribute(); - if ( rank != attr->begin.Size() || rank != attr->size.Size() ) throw std::invalid_argument(constraint); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto startConn = op->Input(TensorUsage::Params0); + auto sizeConn = op->Input(TensorUsage::Params1); + // Compile time constant tensor attributes takes precedence over operator attributes + int startLength = startConn ? startConn->shape.Elements() : (attr ? attr->begin.Size() : -1); + int sizeLength = sizeConn ? sizeConn->shape.Elements() : (attr ? attr->size.Size() : -1); + if ( rank != startLength || rank != sizeLength ) throw std::invalid_argument(constraint); } void ErrorIfCheck_21rq6kn6p1yle(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -935,10 +896,13 @@ void ErrorIfCheck_3rghkieqip43o(const regor::Operation *op, [[maybe_unused]] con static constexpr char constraint[] = "ERROR_IF(start[index] < 0)"; bool checkOk = true; const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto *attr = op->Attribute(); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto startConn = op->Input(TensorUsage::Params0); + // Compile time constant tensor attributes takes precedence over operator attributes + Shape begin = startConn ? GetShapeFromValues(startConn->tensor.get()) : (attr ? attr->begin : Shape{}); for ( int i = 0; i < rank; i++ ) { - if ( attr->begin[i] < 0 ) + if ( begin[i] < 0 ) { checkOk = false; break; @@ -953,10 +917,13 @@ void ErrorIfCheck_1cyv9n59wyyyc(const regor::Operation *op, [[maybe_unused]] con static constexpr char constraint[] = "ERROR_IF(size[index] <= 0)"; bool checkOk = true; const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto *attr = op->Attribute(); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto sizeConn = op->Input(TensorUsage::Params1); + // Compile time constant tensor attributes takes precedence over operator attributes + Shape size = sizeConn ? GetShapeFromValues(sizeConn->tensor.get()) : (attr ? attr->size : Shape{}); for ( int i = 0; i < rank; i++ ) { - if ( attr->size[i] < 0 ) + if ( size[i] < 0 ) { checkOk = false; break; @@ -972,10 +939,15 @@ void ErrorIfCheck_3oy2tclc6uhsu(const regor::Operation *op, [[maybe_unused]] con bool checkOk = true; const auto &shape = op->Input(TensorUsage::IFM)->shape; const auto rank = shape.Size(); - auto *attr = op->Attribute(); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto startConn = op->Input(TensorUsage::Params0); + auto sizeConn = op->Input(TensorUsage::Params1); + // Compile time constant tensor attributes takes precedence over operator attributes + Shape begin = startConn ? GetShapeFromValues(startConn->tensor.get()) : (attr ? attr->begin : Shape{}); + Shape size = sizeConn ? GetShapeFromValues(sizeConn->tensor.get()) : (attr ? attr->size : Shape{}); for ( int i = 0; i < rank; i++ ) { - int64_t sliceSize = attr->begin[i] + attr->size[i]; + int64_t sliceSize = begin[i] + size[i]; if ( sliceSize > shape[i] ) { checkOk = false; @@ -991,10 +963,13 @@ void ErrorIfCheck_gpp3enlp1ddg(const regor::Operation *op, [[maybe_unused]] cons static constexpr char constraint[] = "ERROR_IF(shape[index] != size[index])"; const auto &shape = op->Output(TensorUsage::OFM)->shape; const auto rank = shape.Size(); - auto *attr = op->Attribute(); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto sizeConn = op->Input(TensorUsage::Params1); + // Compile time constant tensor attributes takes precedence over operator attributes + Shape size = sizeConn ? GetShapeFromValues(sizeConn->tensor.get()) : (attr ? attr->size : Shape{}); for ( int i = 0; i < rank; i++ ) { - if ( shape[i] != attr->size[i] ) throw std::invalid_argument(constraint); + if ( shape[i] != size[i] ) throw std::invalid_argument(constraint); } } @@ -1003,8 +978,13 @@ void ErrorIfCheck_ix9div4ld46q(const regor::Operation *op, [[maybe_unused]] cons // Operators: SLICE, static constexpr char constraint[] = "ERROR_IF(shapeCheck(size, [rank(shape1)], start, [rank(shape1)]))"; auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto *attr = op->Attribute(); - if ( attr->size.Size() != rank || attr->begin.Size() != rank ) throw std::invalid_argument(constraint); + auto *attr = op->HasAttribute() ? op->Attribute() : nullptr; + auto startConn = op->Input(TensorUsage::Params0); + auto sizeConn = op->Input(TensorUsage::Params1); + // Compile time constant tensor attributes takes precedence over operator attributes + int startLength = startConn ? startConn->shape.Elements() : (attr ? attr->begin.Size() : -1); + int sizeLength = sizeConn ? sizeConn->shape.Elements() : (attr ? attr->size.Size() : -1); + if ( startLength != rank || sizeLength != rank ) throw std::invalid_argument(constraint); } void ErrorIfCheck_3estuseky2gm2(const regor::Operation *op, [[maybe_unused]] const Context &context) @@ -1013,8 +993,7 @@ void ErrorIfCheck_3estuseky2gm2(const regor::Operation *op, [[maybe_unused]] con static constexpr char constraint[] = "ERROR_IF(shape1[i] * multiples[i] != shape[i])"; const auto &shape = op->Output(TensorUsage::OFM)->shape; const auto &shape1 = op->Input(TensorUsage::IFM)->shape; - const auto view = op->Input(TensorUsage::Params)->tensor->View(); - Shape multiples(view.Buffer()->Data(), view.ViewShape().Elements()); + Shape multiples = GetShapeFromValues(op->Input(TensorUsage::Params)->tensor.get()); if ( multiples.Size() != shape.Size() ) throw std::invalid_argument(constraint); for ( int i = 0; i < shape.Size(); i++ ) @@ -1022,8 +1001,8 @@ void ErrorIfCheck_3estuseky2gm2(const regor::Operation *op, [[maybe_unused]] con int64_t shape1Dim = shape1[i]; if ( shape1Dim < 0 || multiples[i] < 0 || shape[i] < 0 ) throw std::invalid_argument(constraint); int64_t result = shape1Dim * multiples[i]; - if ( result > std::numeric_limits::max() ) throw std::invalid_argument(constraint); - if ( static_cast(result) != shape[i] ) throw std::invalid_argument(constraint); + if ( result > std::numeric_limits::max() ) throw std::invalid_argument(constraint); + if ( int(result) != shape[i] ) throw std::invalid_argument(constraint); } } @@ -1183,10 +1162,13 @@ void ErrorIfCheck_1obslcewwn583(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(scale_y_n <= 0 || scale_y_d <= 0 || scale_x_n <= 0 || scale_x_d <= 0)"; const auto *attr = op->Attribute(); - auto scale_y_d = attr->scaleY.d; - auto scale_y_n = attr->scaleY.n; - auto scale_x_d = attr->scaleX.d; - auto scale_x_n = attr->scaleX.n; + const auto scaleConn = op->Input(TensorUsage::Params); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto scale_y_d = scale ? scale[1] : attr->scaleY.d; + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; + auto scale_x_d = scale ? scale[3] : attr->scaleX.d; + auto scale_x_n = scale ? scale[2] : attr->scaleX.n; if ( scale_y_n <= 0 || scale_y_d <= 0 || scale_x_n <= 0 || scale_x_d <= 0 ) throw std::invalid_argument(constraint); } @@ -1195,8 +1177,11 @@ void ErrorIfCheck_3oxfjen91qb6l(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(scale_y_n > (1 << 11) || scale_x_n > (1 << 11))"; const auto *attr = op->Attribute(); - auto scale_y_n = attr->scaleY.n; - auto scale_x_n = attr->scaleX.n; + const auto scaleConn = op->Input(TensorUsage::Params); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; + auto scale_x_n = scale ? scale[2] : attr->scaleX.n; if ( scale_y_n > (1 << 11) || scale_x_n > (1 << 11) ) throw std::invalid_argument(constraint); } @@ -1205,10 +1190,13 @@ void ErrorIfCheck_1uo0z247e42af(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(scale_y_d >= 16 * scale_y_n || scale_x_d >= 16 * scale_x_n)"; const auto *attr = op->Attribute(); - auto scale_y_d = attr->scaleY.d; - auto scale_y_n = attr->scaleY.n; - auto scale_x_d = attr->scaleX.d; - auto scale_x_n = attr->scaleX.n; + const auto scaleConn = op->Input(TensorUsage::Params); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto scale_y_d = scale ? scale[1] : attr->scaleY.d; + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; + auto scale_x_d = scale ? scale[3] : attr->scaleX.d; + auto scale_x_n = scale ? scale[2] : attr->scaleX.n; if ( scale_y_d >= 16 * scale_y_n || scale_x_d >= 16 * scale_x_n ) throw std::invalid_argument(constraint); } @@ -1217,8 +1205,13 @@ void ErrorIfCheck_1eovh9pyc6tyw(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(offset_y < -scale_y_n || offset_y >= 16 * scale_y_n)"; const auto *attr = op->Attribute(); - auto offset_y = attr->offset.y; - auto scale_y_n = attr->scaleY.n; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto offsetConn = op->Input(TensorUsage::Params1); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape offset = offsetConn ? GetShapeFromValues(offsetConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto offset_y = offset ? offset[0] : attr->offset.y; + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; if ( offset_y < -scale_y_n || offset_y >= 16 * scale_y_n ) throw std::invalid_argument(constraint); } @@ -1227,8 +1220,13 @@ void ErrorIfCheck_24jsin2zkf4ug(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(offset_x < -scale_x_n || offset_x >= 16 * scale_x_n)"; const auto *attr = op->Attribute(); - auto offset_x = attr->offset.x; - auto scale_x_n = attr->scaleX.n; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto offsetConn = op->Input(TensorUsage::Params1); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape offset = offsetConn ? GetShapeFromValues(offsetConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto offset_x = offset ? offset[1] : attr->offset.x; + auto scale_x_n = scale ? scale[2] : attr->scaleX.n; if ( offset_x < -scale_x_n || offset_x >= 16 * scale_x_n ) throw std::invalid_argument(constraint); } @@ -1237,8 +1235,13 @@ void ErrorIfCheck_12uj5fltk5rbo(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(border_y < -16 * scale_y_n || border_y >= scale_y_n)"; const auto *attr = op->Attribute(); - auto border_y = attr->border.y; - auto scale_y_n = attr->scaleY.n; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto borderConn = op->Input(TensorUsage::Params2); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape border = borderConn ? GetShapeFromValues(borderConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto border_y = border ? border[0] : attr->border.y; + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; if ( border_y < -16 * scale_y_n || border_y >= scale_y_n ) throw std::invalid_argument(constraint); } @@ -1247,8 +1250,13 @@ void ErrorIfCheck_1py9f91imwjxe(const regor::Operation *op, [[maybe_unused]] con // Operators: RESIZE, static constexpr char constraint[] = "ERROR_IF(border_x < -16 * scale_x_n || border_x >= scale_x_n)"; const auto *attr = op->Attribute(); - auto border_x = attr->border.x; - auto scale_x_n = attr->scaleX.n; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto borderConn = op->Input(TensorUsage::Params2); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape border = borderConn ? GetShapeFromValues(borderConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto border_x = border ? border[1] : attr->border.x; + auto scale_x_n = scale ? scale[2] : attr->scaleY.n; if ( border_x < -16 * scale_x_n || border_x >= scale_x_n ) throw std::invalid_argument(constraint); } @@ -1259,17 +1267,24 @@ void ErrorIfCheck_fn614zzdrdfd(const regor::Operation *op, [[maybe_unused]] cons auto IH = op->Input(TensorUsage::IFM)->shape.Height(); auto OH = op->Output(TensorUsage::OFM)->shape.Height(); const auto *attr = op->Attribute(); - auto scale_y_n = attr->scaleY.n; - auto scale_y_d = attr->scaleY.d; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto offsetConn = op->Input(TensorUsage::Params1); + const auto borderConn = op->Input(TensorUsage::Params2); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape offset = offsetConn ? GetShapeFromValues(offsetConn->tensor.get()) : Shape{}; + Shape border = borderConn ? GetShapeFromValues(borderConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto scale_y_n = scale ? scale[0] : attr->scaleY.n; + auto scale_y_d = scale ? scale[1] : attr->scaleY.d; if ( scale_y_n > (1 << 11) || scale_y_d >= 16 * scale_y_n ) throw std::invalid_argument(constraint); - auto offset_y = attr->offset.y; + auto offset_y = offset ? offset[0] : attr->offset.y; if ( offset_y < -scale_y_n || offset_y >= 16 * scale_y_n ) throw std::invalid_argument(constraint); if ( IH < 1 || IH >= std::numeric_limits::max() || scale_y_n <= 0 || scale_y_d <= 0 ) throw std::invalid_argument(constraint); int64_t term1 = (IH - 1LL) * scale_y_n; if ( term1 >= std::numeric_limits::max() - 2LL * std::numeric_limits::max() - 1 ) throw std::invalid_argument(constraint); - int64_t numerator = term1 - offset_y + attr->border.y; + int64_t numerator = term1 - offset_y + (border ? border[0] : attr->border.y); if ( numerator % scale_y_d != 0 ) throw std::invalid_argument(constraint); if ( OH != numerator / scale_y_d + 1 ) throw std::invalid_argument(constraint); } @@ -1281,81 +1296,164 @@ void ErrorIfCheck_338aejy0aeqeg(const regor::Operation *op, [[maybe_unused]] con auto IW = op->Input(TensorUsage::IFM)->shape.Width(); auto OW = op->Output(TensorUsage::OFM)->shape.Width(); const auto *attr = op->Attribute(); - auto scale_x_n = attr->scaleX.n; - auto scale_x_d = attr->scaleX.d; + const auto scaleConn = op->Input(TensorUsage::Params0); + const auto offsetConn = op->Input(TensorUsage::Params1); + const auto borderConn = op->Input(TensorUsage::Params2); + Shape scale = scaleConn ? GetShapeFromValues(scaleConn->tensor.get()) : Shape{}; + Shape offset = offsetConn ? GetShapeFromValues(offsetConn->tensor.get()) : Shape{}; + Shape border = borderConn ? GetShapeFromValues(borderConn->tensor.get()) : Shape{}; + // Compile time constant tensor attributes takes precedence over operator attributes + auto scale_x_n = scale ? scale[2] : attr->scaleX.n; + auto scale_x_d = scale ? scale[3] : attr->scaleX.d; if ( scale_x_n > (1 << 11) || scale_x_d >= 16 * scale_x_n ) throw std::invalid_argument(constraint); - auto offset_x = attr->offset.x; + auto offset_x = offset ? offset[1] : attr->offset.x; if ( offset_x < -scale_x_n || offset_x >= 16 * scale_x_n ) throw std::invalid_argument(constraint); if ( IW < 1 || IW >= std::numeric_limits::max() || scale_x_n <= 0 || scale_x_d <= 0 ) throw std::invalid_argument(constraint); int64_t term1 = (IW - 1LL) * scale_x_n; if ( term1 >= std::numeric_limits::max() - 2LL * std::numeric_limits::max() ) throw std::invalid_argument(constraint); - int64_t numerator = term1 - offset_x + attr->border.x; + int64_t numerator = term1 - offset_x + (border ? border[1] : attr->border.x); if ( numerator % scale_x_d != 0 ) throw std::invalid_argument(constraint); if ( OW != numerator / scale_x_d + 1 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_7p5naeft5ga8(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2a4sjfbd544h5(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t != int8_t && in_t != uint8_t && in_t != uint16_t && input_zp != 0)"; - auto in_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - if ( in_t != DataType::Int8 && in_t != DataType::UInt8 && in_t != DataType::UInt16 && input_zp != 0 ) - throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(!is_same() && (!is_same() || input_unsigned == false) && input_zp != 0)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + const auto input_zp = Scalar(*op->Input(TensorUsage::Params2)->tensor); + bool error = in_t != DataType::Int8 && (in_t != DataType::Int16 || !attr->input_unsigned) && input_zp != 0; + if ( error ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_2hqaqrremyime(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_32ylwe00j5q2l(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(out_t != int8_t && out_t != uint8_t && out_t != uint16_t && output_zp != 0)"; - auto out_t = op->OFM()->Type(); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - if ( out_t != DataType::Int8 && out_t != DataType::UInt8 && out_t != DataType::UInt16 && output_zp != 0 ) - throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(!is_same() && (!is_same() || output_unsigned == false) && output_zp != 0)"; + const auto attr = op->Attribute(); + const auto out_t = op->OFM()->Type(); + const auto output_zp = Scalar(*op->Input(TensorUsage::Params3)->tensor); + bool error = out_t != DataType::Int8 && (out_t != DataType::Int16 || !attr->output_unsigned) && output_zp != 0; + if ( error ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_1wo90hck51cpk(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_3uwlzew8kfq5w(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t == uint16_t && (input_zp != 0 || input_zp != 32768))"; - auto in_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - if ( in_t == DataType::UInt16 && (input_zp != 0 && input_zp != 32768) ) throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(is_same() && input_unsigned == true && input_zp != 0 && input_zp != 32768)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + const auto input_zp = Scalar(*op->Input(TensorUsage::Params2)->tensor); + // The scalar zero point tensor will be converted as int16_t, + // change the check below since int16_t(-32768) == uint16_t(32768) + bool error = in_t == DataType::Int16 && attr->input_unsigned && input_zp != 0 && input_zp != -32768; + if ( error ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_v4b9g32rnf6p(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1sxf726x838dv(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(out_t == uint16_t && (output_zp != 0 || output_zp != 32768))"; - auto out_t = op->OFM()->Type(); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - if ( out_t == DataType::UInt16 && (output_zp != 0 && output_zp != 32768) ) throw std::invalid_argument(constraint); + static constexpr char constraint[] = "ERROR_IF(is_same() && output_unsigned == true && output_zp != 0 && output_zp != 32768)"; + const auto attr = op->Attribute(); + const auto out_t = op->OFM()->Type(); + const auto output_zp = Scalar(*op->Input(TensorUsage::Params3)->tensor); + // The scalar zero point tensor will be converted as int16_t, + // change the check below since int16_t(-32768) == uint16_t(32768) + bool error = out_t == DataType::Int16 && attr->output_unsigned && output_zp != 0 && output_zp != -32768; + if ( error ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_22dev8it3bz2g(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2fl3he9sci345(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(scale32 && in_t == int48_t)"; - auto in_t = op->IFM(0)->Type(); - const auto *attr = op->Attribute(); + static constexpr char constraint[] = "ERROR_IF(scale32 && is_same())"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); if ( attr->scale32 && in_t == DataType::Int48 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_3ms1pbkpa2td9(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_1acxf2776vdap(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(!scale32 && double_round)"; - const auto *attr = op->Attribute(); - + static constexpr char constraint[] = "ERROR_IF(!scale32 && (rounding_mode == DOUBLE_ROUND))"; + const auto attr = op->Attribute(); if ( !attr->scale32 && attr->double_round ) throw std::invalid_argument(constraint); } +void ErrorIfCheck_2ntycki2dof18(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(input_unsigned && output_unsigned)"; + const auto attr = op->Attribute(); + if ( attr->input_unsigned && attr->output_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_1yv98jo1xcmke(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same() && input_unsigned)"; + const auto attr = op->Attribute(); + const auto out_t = op->OFM()->Type(); + if ( out_t == DataType::Int32 && attr->input_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_bkdiivlz937z(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same() && output_unsigned)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + if ( in_t == DataType::Int32 && attr->output_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_242iuwska81dr(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same() && output_unsigned)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + if ( in_t == DataType::Int48 && attr->output_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_2vooovn86b8fd(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same && input_unsigned)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + if ( in_t == DataType::Int48 && attr->input_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_107z2k4den74o(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same && input_unsigned)"; + const auto attr = op->Attribute(); + const auto in_t = op->IFM(0)->Type(); + if ( in_t == DataType::Int32 && attr->input_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_38712gnuluf0u(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(is_same && output_unsigned)"; + const auto attr = op->Attribute(); + const auto out_t = op->OFM()->Type(); + if ( out_t == DataType::Int32 && attr->output_unsigned ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_4alci0dog4gp(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: RESCALE, + static constexpr char constraint[] = "ERROR_IF(per_channel && rank(input) < 1)"; + const auto attr = op->Attribute(); + const auto rank_input = op->IFM(0)->StorageShape().Size(); + if ( attr->per_channel && rank_input < 1 ) throw std::invalid_argument(constraint); +} + void ErrorIfCheck_31ty7f0kcbfxg(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESCALE, @@ -1369,6 +1467,29 @@ void ErrorIfCheck_31ty7f0kcbfxg(const regor::Operation *op, [[maybe_unused]] con if ( shiftShape[0] != NC || multiplierShape[0] != NC ) throw std::invalid_argument(constraint); // NC } +void ErrorIfCheck_3oet4aggtv528(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CONST, + // Would ideally check that the shape of the attibute "values" matches output shape, but that has already been + // read in to the OFM buffer so no tensor exists. Instead, check that buffer has enough elements for the ofm. + static constexpr char constraint[] = "ERROR_IF(rankCheck(output, values))"; + const auto &ofmConn = op->Output(TensorUsage::OFM); + const auto bufferSize = ofmConn->tensor->View().Buffer()->Size(); + const auto storageSize = DataTypeStorageSizeBytes(ofmConn->tensor->Type(), ofmConn->shape.Elements()); + // TOSA tensors align to 8 bytes so can't check exact size + // Instead, check that buffer is big enough to fill the OFM + if ( bufferSize < storageSize ) throw std::invalid_argument(constraint); +} + +void ErrorIfCheck_15kl5g5u1jrhq(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: COND_IF, WHILE_LOOP, + static constexpr char constraint[] = "ERROR_IF(tosa_nesting_depth >= MAX_NESTING)"; + bool checkOk = true; + checkOk = (op != nullptr); // Can't implement this check with current validation code + if ( !checkOk ) throw std::invalid_argument(constraint); +} + static bool ShapeListsMatch(const ordered_map &A, const std::vector> &B, bool skipFirst = false) { @@ -1446,10 +1567,10 @@ void ErrorIfCheck_omgw2xdm6irr(const regor::Operation *op, [[maybe_unused]] cons if ( condSize != 1 ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_18hgmc3pexnw4(const regor::Operation *op, [[maybe_unused]] const Context &context) +void ErrorIfCheck_2jyu87hs8upt4(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: WHILE_LOOP, - static constexpr char constraint[] = "ERROR_IF(tensor_list_shape(input_list) != tosa_list_shape(output_list))"; + static constexpr char constraint[] = "ERROR_IF(tensor_list_shape(input_list) != tensor_list_shape(output_list))"; if ( !ShapeListsMatch(op->Inputs(), op->Outputs()) ) throw std::invalid_argument(constraint); } @@ -1498,313 +1619,6 @@ void ErrorIfCheck_1fzl0zyxyd88z(const regor::Operation *op, [[maybe_unused]] con if ( type != DataType::Bool8 && type != DataType::Bool ) throw std::invalid_argument(constraint); } -void ErrorIfCheck_10u6py7exa66n(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CLAMP, SIGMOID, TANH, TABLE, REVERSE, CAST, RESCALE, - static constexpr char constraint[] = "ERROR_IF(rankCheck(output, input))"; - const auto &outputShape = op->Output(TensorUsage::OFM)->shape; - const auto &inputShape = op->Input(TensorUsage::IFM)->shape; - if ( outputShape != inputShape ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_396rg8p65j58r(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: ABS, BITWISE_NOT, CEIL, CLZ, EXP, FLOOR, LOG, LOGICAL_NOT, NEGATE, RECIPROCAL, RSQRT, IDENTITY, - static constexpr char constraint[] = "ERROR_IF(rankCheck(output, input1))"; - const auto &outputShape = op->Output(TensorUsage::OFM)->shape; - const auto &inputShape = op->Input(TensorUsage::IFM)->shape; - if ( outputShape != inputShape ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3oet4aggtv528(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONST, - // Would ideally check that the shape of the attibute "values" matches output shape, but that has already been - // read in to the OFM buffer so no tensor exists. Instead, check that buffer has enough elements for the ofm. - static constexpr char constraint[] = "ERROR_IF(rankCheck(output, values))"; - const auto &ofmConn = op->Output(TensorUsage::OFM); - const auto bufferSize = ofmConn->tensor->View().Buffer()->Size(); - const auto storageSize = DataTypeStorageSizeBytes(ofmConn->tensor->Type(), ofmConn->shape.Elements()); - // TOSA tensors align to 8 bytes so can't check exact size - // Instead, check that buffer is big enough to fill the OFM - if ( bufferSize < storageSize ) throw std::invalid_argument(constraint); -} - -} // namespace checks -} // namespace validator -} // namespace tosa -namespace tosa -{ -namespace validator -{ -namespace checks -{ -// Checks for TOSA specification 0.80.0 -void ErrorIfCheck_4tfs5fdsigv(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: AVG_POOL2D, - static constexpr char constraint[] = "ERROR_IF(in_out_t != i8_t && input_zp != 0)"; - const auto *input = op->Input(TensorUsage::IFM); - auto in_out_t = input->tensor->Type(); - auto &zp = input->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && input_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3nav30dsmv6gd(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: AVG_POOL2D, NEGATE, - static constexpr char constraint[] = "ERROR_IF(in_out_t != i8_t && output_zp != 0)"; - auto in_out_t = op->OFM()->Type(); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && output_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_2p5uniza3kjyg(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV2D, CONV3D, DEPTHWISE_CONV2D, FULLY_CONNECTED, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(in_t != i8_t && input_zp != 0)"; - auto in_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - if ( in_t != DataType::Int8 && input_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_1gr4n0iszdlxr(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV2D, CONV3D, FULLY_CONNECTED, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(BC != OC && BC != 1)"; - const auto *bias = op->Input(TensorUsage::Scales); - const auto *output = op->Output(TensorUsage::OFM); - if ( (bias->shape.Elements() != 1) && !shapeCheck(output, -1, bias, 0) ) - throw std::invalid_argument(constraint); // OC -} - -void ErrorIfCheck_318wf63fa7ql0(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONV3D, DEPTHWISE_CONV2D, FULLY_CONNECTED, TRANSPOSE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(weight_t != i8_t && weight_zp != 0)"; - auto weight_t = op->Input(TensorUsage::Weights)->tensor->Type(); - auto &zp = op->Input(TensorUsage::Weights)->quantization.zeroPoints; - auto weight_zp = zp.empty() ? 0 : zp[0]; - if ( weight_t != DataType::Int8 && weight_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_2d0jmyhr9lscf(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: DEPTHWISE_CONV2D, - static constexpr char constraint[] = "ERROR_IF(BC != C*M && BC != 1)"; - const auto *bias = op->Input(TensorUsage::Scales); - const auto *output = op->Output(TensorUsage::OFM); - if ( (bias->shape.Elements() != 1) && !shapeCheck(output, 3, bias, 0) ) - throw std::invalid_argument(constraint); // OC = C*M -} - -void ErrorIfCheck_28csiz8foar64(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: MATMUL, - static constexpr char constraint[] = "ERROR_IF(in_t != i8_t && (A_zp != 0 || B_zp != 0))"; - auto in_t = op->IFM(0)->Type(); - auto &zpA = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto A_zp = zpA.empty() ? 0 : zpA[0]; - auto &zpB = op->Input(TensorUsage::IFM1)->quantization.zeroPoints; - auto B_zp = zpB.empty() ? 0 : zpB[0]; - if ( in_t != DataType::Int8 && (A_zp != 0 || B_zp != 0) ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3tu2mqt96ickt(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: ADD, INTDIV, MUL, SUB, - static constexpr char constraint[] = "ERROR_IF(rank(shape) != 0 || rank(shape1) != 0 || rank(shape2) != 0)"; - bool checkOk = true; - checkOk = (op != nullptr); // Not implemented, this constraint for mode shape_t is gone in 0.90; - if ( !checkOk ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_1hynqeiugz9lt(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: ADD, ARITHMETIC_RIGHT_SHIFT, BITWISE_AND, BITWISE_OR, BITWISE_XOR, INTDIV, LOGICAL_AND, - // LOGICAL_LEFT_SHIFT, LOGICAL_RIGHT_SHIFT, LOGICAL_OR, LOGICAL_XOR, MAXIMUM, MINIMUM, MUL, POW, SUB, EQUAL, - // GREATER, GREATER_EQUAL, - static constexpr char constraint[] = "ERROR_IF(shape != broadcast_shape(shape1, shape2))"; - auto shape1 = op->Input(TensorUsage::IFM)->shape; - auto shape2 = op->Input(TensorUsage::IFM1)->shape; - auto shape = op->Output(TensorUsage::OFM)->shape; - if ( shape != broadcastShape(shape1, shape2) ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_1advtk54oueo2(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: NEGATE, - static constexpr char constraint[] = "ERROR_IF(in_out_t != i8_t && input1_zp != 0)"; - auto in_out_t = op->IFM(0)->Type(); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input1_zp = zp.empty() ? 0 : zp[0]; - if ( in_out_t != DataType::Int8 && input1_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_192e2vu3t5aqm(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: SELECT, - static constexpr char constraint[] = "ERROR_IF(shape != broadcast_shape(broadcast_shape(shape1, shape2), shape3))"; - auto shape1 = op->Input(TensorUsage::IFM)->shape; - auto shape2 = op->Input(TensorUsage::IFM1)->shape; - auto shape3 = op->Input(TensorUsage::IFM2)->shape; - auto shape = op->Output(TensorUsage::OFM)->shape; - if ( shape != broadcastShape(broadcastShape(shape1, shape2), shape3) ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_5y7ov1oeymoa(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(axis < 0 || axis >= max(1,rank(shapes1[0])))"; - const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto *attr = op->Attribute(); - if ( attr->axis < 0 || attr->axis >= std::max(1, rank) ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_oln8qpyh6lba(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(shape[axis] != sum(shape_dim(shapes1[k], axis) for all k))"; - const auto &shape = op->Output(TensorUsage::OFM)->shape; - const auto &inputs = op->Inputs(); - auto axis = op->Attribute()->axis; - int64_t sum = 0; - for ( const auto &input : inputs ) - { - auto inputDim = axis >= input.shape.Size() ? 1 : input.shape[axis]; - if ( inputDim < 0 || sum + inputDim > std::numeric_limits::max() ) - throw std::invalid_argument(constraint); - sum += inputDim; - } - if ( shape[axis] != sum ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3thipxl768n8b(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(in_out_t == shape_t && rank(shape) > 1)"; - bool checkOk = true; - checkOk = (op != nullptr); // Not implemented, this constraint for mode shape_t is gone in 0.90; - if ( !checkOk ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3bzibvkt1zqng(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: CONCAT, - static constexpr char constraint[] = "ERROR_IF(index != axis && input_shape[index] != shapes1[0][index])"; - auto axis = op->Attribute()->axis; - const auto &inputs = op->Inputs(); - const auto &shape0 = inputs.front().shape; - for ( const auto &input : inputs ) - { - if ( input.shape.Size() != shape0.Size() ) - throw std::invalid_argument("ERROR_IF(rank(input_shape) != rank(shapes1[0]))"); - for ( int index = 0; index < input.shape.Size(); index++ ) - { - if ( index != axis && input.shape[index] != shape0[index] ) throw std::invalid_argument(constraint); - } - } -} - -void ErrorIfCheck_171if2aq7ntnm(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: DIM, - static constexpr char constraint[] = "ERROR_IF(axis >= rank(shape))"; - const auto rank = op->Input(TensorUsage::IFM)->shape.Size(); - auto axis = op->Attribute()->axis; - if ( axis >= rank ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_1wbutqm1lq6qy(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t != i8_t && (in_t != i16_t || input_unsigned == False) && input_zp != 0)"; - auto bits = DataTypeSizeBits(op->IFM(0)->Type()); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - auto *attr = op->Attribute(); - if ( bits != 8 && (bits != 16 || !attr->input_unsigned) && input_zp != 0 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_2x883ovw61v55(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(out_t != i8_t && (out_t != i16_t || output_unsigned == False) && output_zp != 0)"; - auto bits = DataTypeSizeBits(op->OFM()->Type()); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - auto *attr = op->Attribute(); - if ( bits != 8 && (bits != 16 || !attr->output_unsigned) && output_zp != 0 ) - throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_7yfu5xo1ii36(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t == i16_t && input_unsigned == True && input_zp != 0 && input_zp != 32768)"; - auto bits = DataTypeSizeBits(op->IFM(0)->Type()); - auto &zp = op->Input(TensorUsage::IFM)->quantization.zeroPoints; - auto input_zp = zp.empty() ? 0 : zp[0]; - auto *attr = op->Attribute(); - if ( bits == 16 && attr->input_unsigned && input_zp != 0 && input_zp != 32768 ) - throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3kc0n1wjhehqz(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(out_t == i16_t && output_unsigned == True && output_zp != 0 && output_zp != 32768)"; - auto bits = DataTypeSizeBits(op->OFM()->Type()); - auto &zp = op->Output(TensorUsage::OFM)->quantization.zeroPoints; - auto output_zp = zp.empty() ? 0 : zp[0]; - auto *attr = op->Attribute(); - if ( bits == 16 && attr->output_unsigned && output_zp != 0 && output_zp != 32768 ) - throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_3rzfyy6qi1bly(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(scale32 && in_t == i48_t)"; - auto in_t = op->IFM(0)->Type(); - const auto *attr = op->Attribute(); - if ( attr->scale32 && DataTypeSizeBits(in_t) == 48 ) throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_23cyq2l8quj8p(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t == i16_t && out_t == i32_t && input_unsigned)"; - auto in_t = op->IFM(0)->Type(); - auto out_t = op->OFM()->Type(); - auto *attr = op->Attribute(); - if ( DataTypeSizeBits(in_t) == 16 && DataTypeSizeBits(out_t) == 32 && attr->input_unsigned ) - throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_13bcaagzywlqq(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: RESCALE, - static constexpr char constraint[] = "ERROR_IF(in_t == i32_t && out_t == i16_t && output_unsigned)"; - auto in_t = op->IFM(0)->Type(); - auto out_t = op->OFM()->Type(); - auto *attr = op->Attribute(); - if ( DataTypeSizeBits(in_t) == 32 && DataTypeSizeBits(out_t) == 16 && attr->output_unsigned ) - throw std::invalid_argument(constraint); -} - -void ErrorIfCheck_15kl5g5u1jrhq(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: COND_IF, WHILE_LOOP, - static constexpr char constraint[] = "ERROR_IF(tosa_nesting_depth >= MAX_NESTING)"; - bool checkOk = true; - checkOk = (op != nullptr); // Can't implement this check with current validation code - if ( !checkOk ) throw std::invalid_argument(constraint); -} - } // namespace checks } // namespace validator } // namespace tosa diff --git a/ethosu/regor/tosa/tosa_error_checks.hpp b/ethosu/regor/tosa/tosa_error_checks.hpp index e96f1b60..0dcf415d 100644 --- a/ethosu/regor/tosa/tosa_error_checks.hpp +++ b/ethosu/regor/tosa/tosa_error_checks.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #pragma once @@ -28,11 +28,11 @@ namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 -void ErrorIfCheck_ai0sdq9wgm72(const regor::Operation *op, const Context &context); +// Checks for TOSA Specification 1.0.0draft +void ErrorIfCheck_3tg4p2a5te0jy(const regor::Operation *op, const Context &context); void ErrorIfCheck_gpp861oen43y(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1vu5c1tytwmhu(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1n0denkrrrlr1(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2nanft1ivm5fj(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1ga3gcg4zkrkv(const regor::Operation *op, const Context &context); void ErrorIfCheck_36r4wpx3psd81(const regor::Operation *op, const Context &context); void ErrorIfCheck_1lrylbkd3w7ix(const regor::Operation *op, const Context &context); void ErrorIfCheck_ojmgqziimenu(const regor::Operation *op, const Context &context); @@ -41,28 +41,26 @@ void ErrorIfCheck_125xuezh1964i(const regor::Operation *op, const Context &conte void ErrorIfCheck_fqta626ku4qe(const regor::Operation *op, const Context &context); void ErrorIfCheck_ycjhrvf2yigr(const regor::Operation *op, const Context &context); void ErrorIfCheck_1c57olj698f3d(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1hby1qurzja4f(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1md8k265hfj92(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1hrio849y2qnx(const regor::Operation *op, const Context &context); +void ErrorIfCheck_31vgfyg6fi9t6(const regor::Operation *op, const Context &context); void ErrorIfCheck_3fzsq78v5ypau(const regor::Operation *op, const Context &context); void ErrorIfCheck_2vhj6e48eyzlr(const regor::Operation *op, const Context &context); void ErrorIfCheck_147wc580l2tik(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1gr4n0iszdlxr(const regor::Operation *op, const Context &context); void ErrorIfCheck_2rm8rnsdfn14h(const regor::Operation *op, const Context &context); void ErrorIfCheck_36emtx7zwkk96(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2r9jencgka20o(const regor::Operation *op, const Context &context); -void ErrorIfCheck_207p0r46d35m0(const regor::Operation *op, const Context &context); void ErrorIfCheck_cr43yjpqkcpd(const regor::Operation *op, const Context &context); +void ErrorIfCheck_3m5ijs493bw6j(const regor::Operation *op, const Context &context); void ErrorIfCheck_341t6ysqc16b2(const regor::Operation *op, const Context &context); void ErrorIfCheck_uqm570jwaqb6(const regor::Operation *op, const Context &context); void ErrorIfCheck_34iiwt6o66qfa(const regor::Operation *op, const Context &context); void ErrorIfCheck_llbd3iugmek0(const regor::Operation *op, const Context &context); void ErrorIfCheck_1w510kxt5b2b2(const regor::Operation *op, const Context &context); void ErrorIfCheck_27g3t38z1of4h(const regor::Operation *op, const Context &context); -void ErrorIfCheck_95jvn4dzraol(const regor::Operation *op, const Context &context); -void ErrorIfCheck_21377cjnb1ox7(const regor::Operation *op, const Context &context); void ErrorIfCheck_2cpco8ykx99sa(const regor::Operation *op, const Context &context); -void ErrorIfCheck_10sexbqileii7(const regor::Operation *op, const Context &context); -void ErrorIfCheck_12rt0p658ac1(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3cem64qtn6ajr(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2d0jmyhr9lscf(const regor::Operation *op, const Context &context); +void ErrorIfCheck_10td4qt70dp3i(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1qxtjwwlh068t(const regor::Operation *op, const Context &context); void ErrorIfCheck_1hp4djlq1mi8i(const regor::Operation *op, const Context &context); void ErrorIfCheck_20r08ymi6c43u(const regor::Operation *op, const Context &context); void ErrorIfCheck_1xwwkxeypcw3j(const regor::Operation *op, const Context &context); @@ -71,37 +69,36 @@ void ErrorIfCheck_1m8qk2pbuovev(const regor::Operation *op, const Context &conte void ErrorIfCheck_1iv4j2x95j8dk(const regor::Operation *op, const Context &context); void ErrorIfCheck_316kdwzc9jf5x(const regor::Operation *op, const Context &context); void ErrorIfCheck_tnr115b4spgw(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3ufiqep5ipuco(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3kcipzq18dxv9(const regor::Operation *op, const Context &context); -void ErrorIfCheck_jcjmr2nnatvv(const regor::Operation *op, const Context &context); -void ErrorIfCheck_qwmo2w7hxola(const regor::Operation *op, const Context &context); -void ErrorIfCheck_c9o11f07skde(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1ellfcuw76b13(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2autvayhidla8(const regor::Operation *op, const Context &context); void ErrorIfCheck_h1uadv5irsu6(const regor::Operation *op, const Context &context); void ErrorIfCheck_1kfh97qingywb(const regor::Operation *op, const Context &context); void ErrorIfCheck_1azcq4511qzyx(const regor::Operation *op, const Context &context); -void ErrorIfCheck_15o9wo9pu7mrg(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2befn2dfjcm62(const regor::Operation *op, const Context &context); void ErrorIfCheck_13tqdu59nyxyh(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2kgf2jejxlrr6(const regor::Operation *op, const Context &context); +void ErrorIfCheck_khc2s3en2uxi(const regor::Operation *op, const Context &context); void ErrorIfCheck_q9dl3x81rc4o(const regor::Operation *op, const Context &context); void ErrorIfCheck_2rfkujt9lg7eq(const regor::Operation *op, const Context &context); void ErrorIfCheck_3nelbnmxyemot(const regor::Operation *op, const Context &context); void ErrorIfCheck_24conlof4w8eh(const regor::Operation *op, const Context &context); void ErrorIfCheck_xod9coigx1x2(const regor::Operation *op, const Context &context); +void ErrorIfCheck_15y4an3ceern5(const regor::Operation *op, const Context &context); +void ErrorIfCheck_10u6py7exa66n(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1hynqeiugz9lt(const regor::Operation *op, const Context &context); void ErrorIfCheck_1yism57if6v2z(const regor::Operation *op, const Context &context); void ErrorIfCheck_3k5ug2w7gxc7r(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2gdayq6ofi7wx(const regor::Operation *op, const Context &context); -void ErrorIfCheck_38qvty7pudfz2(const regor::Operation *op, const Context &context); +void ErrorIfCheck_396rg8p65j58r(const regor::Operation *op, const Context &context); +void ErrorIfCheck_3l2ksvk26m07h(const regor::Operation *op, const Context &context); +void ErrorIfCheck_192e2vu3t5aqm(const regor::Operation *op, const Context &context); void ErrorIfCheck_3tccsjner0km9(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3tg4p2a5te0jy(const regor::Operation *op, const Context &context); void ErrorIfCheck_33exz9gn2i1wy(const regor::Operation *op, const Context &context); -void ErrorIfCheck_14slfd7r77hgh(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1fzhf02pkiw9z(const regor::Operation *op, const Context &context); -void ErrorIfCheck_16s99hvsej4fo(const regor::Operation *op, const Context &context); -void ErrorIfCheck_dctmd6sgn5n0(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2d3qdl1f70i6y(const regor::Operation *op, const Context &context); +void ErrorIfCheck_5y7ov1oeymoa(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1aloht2b77zby(const regor::Operation *op, const Context &context); +void ErrorIfCheck_f1kt9a6h7s2p(const regor::Operation *op, const Context &context); +void ErrorIfCheck_302z1f8mq8lg7(const regor::Operation *op, const Context &context); void ErrorIfCheck_14z7y0qe9lwps(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2rfef32dgp3be(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2sfcgak3rj1vs(const regor::Operation *op, const Context &context); +void ErrorIfCheck_3dvn5k3273lwz(const regor::Operation *op, const Context &context); +void ErrorIfCheck_34zvbtwx1r18j(const regor::Operation *op, const Context &context); void ErrorIfCheck_2a1jpygblc07i(const regor::Operation *op, const Context &context); void ErrorIfCheck_3hthyoock2ew5(const regor::Operation *op, const Context &context); void ErrorIfCheck_1nifeiq9rvmb8(const regor::Operation *op, const Context &context); @@ -135,61 +132,34 @@ void ErrorIfCheck_12uj5fltk5rbo(const regor::Operation *op, const Context &conte void ErrorIfCheck_1py9f91imwjxe(const regor::Operation *op, const Context &context); void ErrorIfCheck_fn614zzdrdfd(const regor::Operation *op, const Context &context); void ErrorIfCheck_338aejy0aeqeg(const regor::Operation *op, const Context &context); -void ErrorIfCheck_7p5naeft5ga8(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2hqaqrremyime(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1wo90hck51cpk(const regor::Operation *op, const Context &context); -void ErrorIfCheck_v4b9g32rnf6p(const regor::Operation *op, const Context &context); -void ErrorIfCheck_22dev8it3bz2g(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3ms1pbkpa2td9(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2a4sjfbd544h5(const regor::Operation *op, const Context &context); +void ErrorIfCheck_32ylwe00j5q2l(const regor::Operation *op, const Context &context); +void ErrorIfCheck_3uwlzew8kfq5w(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1sxf726x838dv(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2fl3he9sci345(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1acxf2776vdap(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2ntycki2dof18(const regor::Operation *op, const Context &context); +void ErrorIfCheck_1yv98jo1xcmke(const regor::Operation *op, const Context &context); +void ErrorIfCheck_bkdiivlz937z(const regor::Operation *op, const Context &context); +void ErrorIfCheck_242iuwska81dr(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2vooovn86b8fd(const regor::Operation *op, const Context &context); +void ErrorIfCheck_107z2k4den74o(const regor::Operation *op, const Context &context); +void ErrorIfCheck_38712gnuluf0u(const regor::Operation *op, const Context &context); +void ErrorIfCheck_4alci0dog4gp(const regor::Operation *op, const Context &context); void ErrorIfCheck_31ty7f0kcbfxg(const regor::Operation *op, const Context &context); +void ErrorIfCheck_3oet4aggtv528(const regor::Operation *op, const Context &context); +void ErrorIfCheck_15kl5g5u1jrhq(const regor::Operation *op, const Context &context); void ErrorIfCheck_1bm39avugkqqd(const regor::Operation *op, const Context &context); void ErrorIfCheck_3tv3oatlz37e2(const regor::Operation *op, const Context &context); void ErrorIfCheck_n7biu53x2n6k(const regor::Operation *op, const Context &context); void ErrorIfCheck_2fd4dk1zw032u(const regor::Operation *op, const Context &context); void ErrorIfCheck_omgw2xdm6irr(const regor::Operation *op, const Context &context); -void ErrorIfCheck_18hgmc3pexnw4(const regor::Operation *op, const Context &context); +void ErrorIfCheck_2jyu87hs8upt4(const regor::Operation *op, const Context &context); void ErrorIfCheck_12uu5ff3t3lv8(const regor::Operation *op, const Context &context); void ErrorIfCheck_3puzf7van5acf(const regor::Operation *op, const Context &context); void ErrorIfCheck_8tihij7a5ep0(const regor::Operation *op, const Context &context); void ErrorIfCheck_3lu68v2531bjz(const regor::Operation *op, const Context &context); void ErrorIfCheck_1fzl0zyxyd88z(const regor::Operation *op, const Context &context); -void ErrorIfCheck_10u6py7exa66n(const regor::Operation *op, const Context &context); -void ErrorIfCheck_396rg8p65j58r(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3oet4aggtv528(const regor::Operation *op, const Context &context); -} // namespace checks -} // namespace validator -} // namespace tosa -namespace tosa -{ -namespace validator -{ -namespace checks -{ -// Checks for TOSA specification 0.80.0 -void ErrorIfCheck_4tfs5fdsigv(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3nav30dsmv6gd(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2p5uniza3kjyg(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1gr4n0iszdlxr(const regor::Operation *op, const Context &context); -void ErrorIfCheck_318wf63fa7ql0(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2d0jmyhr9lscf(const regor::Operation *op, const Context &context); -void ErrorIfCheck_28csiz8foar64(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3tu2mqt96ickt(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1hynqeiugz9lt(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1advtk54oueo2(const regor::Operation *op, const Context &context); -void ErrorIfCheck_192e2vu3t5aqm(const regor::Operation *op, const Context &context); -void ErrorIfCheck_5y7ov1oeymoa(const regor::Operation *op, const Context &context); -void ErrorIfCheck_oln8qpyh6lba(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3thipxl768n8b(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3bzibvkt1zqng(const regor::Operation *op, const Context &context); -void ErrorIfCheck_171if2aq7ntnm(const regor::Operation *op, const Context &context); -void ErrorIfCheck_1wbutqm1lq6qy(const regor::Operation *op, const Context &context); -void ErrorIfCheck_2x883ovw61v55(const regor::Operation *op, const Context &context); -void ErrorIfCheck_7yfu5xo1ii36(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3kc0n1wjhehqz(const regor::Operation *op, const Context &context); -void ErrorIfCheck_3rzfyy6qi1bly(const regor::Operation *op, const Context &context); -void ErrorIfCheck_23cyq2l8quj8p(const regor::Operation *op, const Context &context); -void ErrorIfCheck_13bcaagzywlqq(const regor::Operation *op, const Context &context); -void ErrorIfCheck_15kl5g5u1jrhq(const regor::Operation *op, const Context &context); } // namespace checks } // namespace validator } // namespace tosa diff --git a/ethosu/regor/tosa/tosa_level_checks.cpp b/ethosu/regor/tosa/tosa_level_checks.cpp index 9deeb585..898cfaff 100644 --- a/ethosu/regor/tosa/tosa_level_checks.cpp +++ b/ethosu/regor/tosa/tosa_level_checks.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 -// Modify by implementing the constraints. +// Partially generated by tosaValidationGenerator for TOSA Specification 1.0.0draft +// TODO: Implement the constraints. #include "tosa_level_checks.hpp" @@ -26,7 +26,7 @@ namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 +// Checks for TOSA Specification 1.0.0draft void LevelCheck_1lz89reckvj8d(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: ARGMAX, RESHAPE, @@ -236,16 +236,26 @@ void LevelCheck_a0x2apl3zoz(const regor::Operation *op, [[maybe_unused]] const C void LevelCheck_1flzmpv6hubzc(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: CLAMP, SIGMOID, TANH, ADD, ARITHMETIC_RIGHT_SHIFT, BITWISE_AND, BITWISE_OR, BITWISE_XOR, INTDIV, + // Operators: CLAMP, ERF, SIGMOID, TANH, ADD, ARITHMETIC_RIGHT_SHIFT, BITWISE_AND, BITWISE_OR, BITWISE_XOR, INTDIV, // LOGICAL_AND, LOGICAL_LEFT_SHIFT, LOGICAL_RIGHT_SHIFT, LOGICAL_OR, LOGICAL_XOR, MAXIMUM, MINIMUM, MUL, POW, SUB, - // TABLE, ABS, BITWISE_NOT, CEIL, CLZ, EXP, FLOOR, LOG, LOGICAL_NOT, NEGATE, RECIPROCAL, RSQRT, SELECT, EQUAL, - // GREATER, GREATER_EQUAL, CONCAT, PAD, RESHAPE, REVERSE, SLICE, TILE, TRANSPOSE, CAST, RESCALE, + // TABLE, ABS, BITWISE_NOT, CEIL, CLZ, COS, EXP, FLOOR, LOG, LOGICAL_NOT, NEGATE, RECIPROCAL, RSQRT, SIN, SELECT, + // EQUAL, GREATER, GREATER_EQUAL, CONCAT, PAD, RESHAPE, REVERSE, SLICE, TILE, TRANSPOSE, CAST, RESCALE, COND_IF, + // VARIABLE, VARIABLE_WRITE, VARIABLE_READ, static constexpr char constraint[] = "LEVEL_CHECK(rank(shape) <= MAX_RANK)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } +void LevelCheck_3tcyujqdy8gol(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CONCAT, + static constexpr char constraint[] = "LEVEL_CHECK(tensor_list_shape(input1) <= MAX_TENSOR_LIST_SIZE)"; + bool checkOk = true; + checkOk = (op != nullptr); // TODO: Implement check + if ( !checkOk ) throw std::invalid_argument(constraint); +} + void LevelCheck_1r40jc4ashh6o(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: RESIZE, @@ -264,6 +274,24 @@ void LevelCheck_1u7rtl141felu(const regor::Operation *op, [[maybe_unused]] const if ( !checkOk ) throw std::invalid_argument(constraint); } +void LevelCheck_3ufj7d9b3dpok(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CUSTOM, COND_IF, WHILE_LOOP, + static constexpr char constraint[] = "LEVEL_CHECK(tensor_list_shape(input_list) <= MAX_TENSOR_LIST_SIZE)"; + bool checkOk = true; + checkOk = (op != nullptr); // TODO: Implement check + if ( !checkOk ) throw std::invalid_argument(constraint); +} + +void LevelCheck_2b1mift7kqw7v(const regor::Operation *op, [[maybe_unused]] const Context &context) +{ + // Operators: CUSTOM, COND_IF, WHILE_LOOP, + static constexpr char constraint[] = "LEVEL_CHECK(tensor_list_shape(output_list) <= MAX_TENSOR_LIST_SIZE)"; + bool checkOk = true; + checkOk = (op != nullptr); // TODO: Implement check + if ( !checkOk ) throw std::invalid_argument(constraint); +} + } // namespace checks } // namespace validator } // namespace tosa diff --git a/ethosu/regor/tosa/tosa_level_checks.hpp b/ethosu/regor/tosa/tosa_level_checks.hpp index 2e39dea7..d561463d 100644 --- a/ethosu/regor/tosa/tosa_level_checks.hpp +++ b/ethosu/regor/tosa/tosa_level_checks.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #pragma once @@ -28,7 +28,7 @@ namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 +// Checks for TOSA Specification 1.0.0draft void LevelCheck_1lz89reckvj8d(const regor::Operation *op, const Context &context); void LevelCheck_2i1ithnrq06wi(const regor::Operation *op, const Context &context); void LevelCheck_1wobi8axf7z2y(const regor::Operation *op, const Context &context); @@ -53,8 +53,11 @@ void LevelCheck_me421i5r5j13(const regor::Operation *op, const Context &context) void LevelCheck_2ffhdgbz1kvxc(const regor::Operation *op, const Context &context); void LevelCheck_a0x2apl3zoz(const regor::Operation *op, const Context &context); void LevelCheck_1flzmpv6hubzc(const regor::Operation *op, const Context &context); +void LevelCheck_3tcyujqdy8gol(const regor::Operation *op, const Context &context); void LevelCheck_1r40jc4ashh6o(const regor::Operation *op, const Context &context); void LevelCheck_1u7rtl141felu(const regor::Operation *op, const Context &context); +void LevelCheck_3ufj7d9b3dpok(const regor::Operation *op, const Context &context); +void LevelCheck_2b1mift7kqw7v(const regor::Operation *op, const Context &context); } // namespace checks } // namespace validator } // namespace tosa diff --git a/ethosu/regor/tosa/tosa_mapping.cpp b/ethosu/regor/tosa/tosa_mapping.cpp index df27755e..ed69800a 100644 --- a/ethosu/regor/tosa/tosa_mapping.cpp +++ b/ethosu/regor/tosa/tosa_mapping.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -31,16 +31,17 @@ namespace regor static constexpr std::pair s_tensorTypeToDataType[] = { // clang-format off {tosaFb::DType::BOOL, GraphApi::GraphDataType::Bool8}, - {tosaFb::DType::UINT8, GraphApi::GraphDataType::UInt8}, {tosaFb::DType::INT4, GraphApi::GraphDataType::Int4Packed8}, {tosaFb::DType::INT8, GraphApi::GraphDataType::Int8}, {tosaFb::DType::INT16, GraphApi::GraphDataType::Int16}, {tosaFb::DType::INT32, GraphApi::GraphDataType::Int32}, {tosaFb::DType::INT48, GraphApi::GraphDataType::Int48}, {tosaFb::DType::FP32, GraphApi::GraphDataType::Float32}, - {tosaFb::DType::UINT16, GraphApi::GraphDataType::UInt16}, {tosaFb::DType::FP16, GraphApi::GraphDataType::Float16}, {tosaFb::DType::BF16, GraphApi::GraphDataType::BFloat16}, + {tosaFb::DType::SHAPE, GraphApi::GraphDataType::Int64}, + {tosaFb::DType::FP8E4M3, GraphApi::GraphDataType::Float8e4m3}, + {tosaFb::DType::FP8E5M2, GraphApi::GraphDataType::Float8e5m2}, // clang-format on }; @@ -52,12 +53,13 @@ static constexpr std::pair s_FBOpToOp[] = { {tosaFb::Op::CONV2D, tosa::Op::CONV2D}, {tosaFb::Op::CONV3D, tosa::Op::CONV3D}, {tosaFb::Op::DEPTHWISE_CONV2D, tosa::Op::DEPTHWISE_CONV2D}, - {tosaFb::Op::FULLY_CONNECTED, tosa::Op::FULLY_CONNECTED}, + {tosaFb::Op::FFT2D, tosa::Op::FFT2D}, {tosaFb::Op::MATMUL, tosa::Op::MATMUL}, {tosaFb::Op::MAX_POOL2D, tosa::Op::MAX_POOL2D}, + {tosaFb::Op::RFFT2D, tosa::Op::RFFT2D}, {tosaFb::Op::TRANSPOSE_CONV2D, tosa::Op::TRANSPOSE_CONV2D}, {tosaFb::Op::CLAMP, tosa::Op::CLAMP}, - {tosaFb::Op::RESERVED, tosa::Op::RESERVED}, + {tosaFb::Op::ERF, tosa::Op::ERF}, {tosaFb::Op::SIGMOID, tosa::Op::SIGMOID}, {tosaFb::Op::TANH, tosa::Op::TANH}, {tosaFb::Op::ADD, tosa::Op::ADD}, @@ -92,8 +94,8 @@ static constexpr std::pair s_FBOpToOp[] = { {tosaFb::Op::EQUAL, tosa::Op::EQUAL}, {tosaFb::Op::GREATER, tosa::Op::GREATER}, {tosaFb::Op::GREATER_EQUAL, tosa::Op::GREATER_EQUAL}, - {tosaFb::Op::REDUCE_ANY, tosa::Op::REDUCE_ANY}, {tosaFb::Op::REDUCE_ALL, tosa::Op::REDUCE_ALL}, + {tosaFb::Op::REDUCE_ANY, tosa::Op::REDUCE_ANY}, {tosaFb::Op::REDUCE_MAX, tosa::Op::REDUCE_MAX}, {tosaFb::Op::REDUCE_MIN, tosa::Op::REDUCE_MIN}, {tosaFb::Op::REDUCE_PRODUCT, tosa::Op::REDUCE_PRODUCT}, @@ -115,10 +117,10 @@ static constexpr std::pair s_FBOpToOp[] = { {tosaFb::Op::CUSTOM, tosa::Op::CUSTOM}, {tosaFb::Op::COND_IF, tosa::Op::COND_IF}, {tosaFb::Op::WHILE_LOOP, tosa::Op::WHILE_LOOP}, - {tosaFb::Op::FFT2D, tosa::Op::FFT2D}, - {tosaFb::Op::RFFT2D, tosa::Op::RFFT2D}, - {tosaFb::Op::ERF, tosa::Op::ERF}, - {tosaFb::Op::DIM, tosa::Op::DIM}, + {tosaFb::Op::VARIABLE, tosa::Op::VARIABLE}, + {tosaFb::Op::VARIABLE_WRITE, tosa::Op::VARIABLE_WRITE}, + {tosaFb::Op::VARIABLE_READ, tosa::Op::VARIABLE_READ}, + {tosaFb::Op::CONST_SHAPE, tosa::Op::CONST_SHAPE}, // clang-format on }; @@ -151,8 +153,7 @@ constexpr VALUE Lookup(const std::pair (&arr)[SZ], KEY type) { auto pos = std::equal_range(std::begin(arr), std::end(arr), std::pair(type, {}), [](const auto &a, const auto &b) { return a.first < b.first; }); - assert(pos.first != std::end(arr)); - return pos.first->second; + return pos.first != std::end(arr) ? pos.first->second : VALUE(0); } GraphApi::GraphDataType TosaMapping::TensorTypeToDataType(tosaFb::DType type) diff --git a/ethosu/regor/tosa/tosa_reader.cpp b/ethosu/regor/tosa/tosa_reader.cpp index 1f79b4a7..328e698f 100644 --- a/ethosu/regor/tosa/tosa_reader.cpp +++ b/ethosu/regor/tosa/tosa_reader.cpp @@ -18,6 +18,8 @@ #include "tosa_reader.hpp" +#include "common/common.hpp" + #include "common/shape.hpp" #include "compiler/attributes.hpp" #include "compiler/graph_builder.hpp" @@ -60,7 +62,7 @@ inline void tosa_assert(bool cond, const char *msg = nullptr) { if ( !cond ) { - throw std::runtime_error("TOSA FB Reader error : " + std::string(msg ? msg : "Failed to load TOSA model. Buffer contents inconsistent with generated schema")); + throw std::runtime_error("TOSA FB Reader error: " + std::string(msg ? msg : "Failed to load TOSA model. Buffer contents inconsistent with generated schema")); } } @@ -68,7 +70,7 @@ inline void builder_assert(bool cond, const std::string &msg) { if ( !cond ) { - throw std::runtime_error("TOSA builder error : " + msg); + throw std::runtime_error("TOSA builder error: " + msg); } } @@ -90,11 +92,33 @@ double ToDouble(ARG v) return double(v); } +template<> +double ToDouble *>(const ::flatbuffers::Vector *v) +{ + const auto &buf = SafeDeref(v, "No Int8 buffer"); + tosa_assert(buf.size() >= 1, "Malformed constant buffer"); + int8_t r = buf[0]; + return double(r); +} + +template<> +double ToDouble *>(const ::flatbuffers::Vector *v) +{ + const auto &buf = SafeDeref(v, "No Int16 buffer"); + tosa_assert(buf.size() >= 2, "Malformed constant buffer"); + int16_t r = 0; + for ( int i = 0; i < 2; i++ ) + { + r |= uint16_t(buf[i]) << (i * 8); + } + return double(r); +} + template<> double ToDouble *>(const ::flatbuffers::Vector *v) { - const auto &buf = SafeDeref(v); - tosa_assert(buf.size() == 6, "Malformed constant buffer"); + const auto &buf = SafeDeref(v, "No Int48 buffer"); + tosa_assert(buf.size() >= 6, "Malformed constant buffer"); int64_t r = 0; for ( int i = 0; i < 6; i++ ) { @@ -106,8 +130,8 @@ double ToDouble double ToDouble *>(const ::flatbuffers::Vector *v) { - const auto &buf = SafeDeref(v); - tosa_assert(buf.size() == 4, "Malformed constant buffer"); + const auto &buf = SafeDeref(v, "No Float32 buffer"); + tosa_assert(buf.size() >= 4, "Malformed constant buffer"); uint32_t u = 0; for ( int i = 0; i < 4; i++ ) { @@ -119,8 +143,8 @@ double ToDouble double ToDouble *>(const ::flatbuffers::Vector *v) { - const auto &buf = SafeDeref(v); - tosa_assert(buf.size() == 2, "Malformed constant buffer"); + const auto &buf = SafeDeref(v, "No Float16 buffer"); + tosa_assert(buf.size() >= 2, "Malformed constant buffer"); uint32_t u = 0; for ( int i = 0; i < 2; i++ ) { @@ -140,8 +164,8 @@ double ToDouble double ToDouble *>(const ::flatbuffers::Vector *v) { - const auto &buf = SafeDeref(v); - tosa_assert(buf.size() == 2, "Malformed constant buffer"); + const auto &buf = SafeDeref(v, "No BFloat16 buffer"); + tosa_assert(buf.size() >= 2, "Malformed constant buffer"); uint32_t u = 0; for ( int i = 0; i < 2; i++ ) { @@ -157,7 +181,7 @@ GraphApi::GraphTensor *CreateParamTensor(const ::flatbuffers::Vector *a { using ACTUAL_ALLOC_TYPE = std::conditional_t, FB_TYPE, ALLOC_TYPE>; - const auto &buf = SafeDeref(attr); + const auto &buf = SafeDeref(attr, "No attribute buffer"); GraphApi::GraphBuffer *buffer; if constexpr ( std::is_same_v ) @@ -217,9 +241,9 @@ GraphApi::GraphTensorUsage GetTosaTensorUsage(const tosaFb::TosaOperator &op, in { \ static const tosaFb::ATTR_PREFIX &Get(const tosaFb::TosaOperator &op) \ { \ - tosa_assert(op.attribute_type() == tosaFb::Attribute::ATTR_PREFIX, "Malformed TOSA Flatbuffer attribute"); \ + tosa_assert(op.attribute_type() == tosaFb::Attribute::ATTR_PREFIX, "Malformed TOSA Flatbuffer " #ATTR_PREFIX); \ auto attr = op.attribute_as(); \ - return SafeDeref(attr, "Malformed TOSA Flatbuffer attribute"); \ + return SafeDeref(attr, "Malformed TOSA Flatbuffer " #ATTR_PREFIX); \ } \ \ TosaAttr() \ @@ -230,77 +254,92 @@ GraphApi::GraphTensorUsage GetTosaTensorUsage(const tosaFb::TosaOperator &op, in TosaAttr s_Init_##OP_ENUM // clang-format off -TOSA_REGISTER_OP(ARGMAX, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(AVG_POOL2D, PoolAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(CONV2D, ConvAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales); -TOSA_REGISTER_OP(CONV3D, ConvAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales); -TOSA_REGISTER_OP(DEPTHWISE_CONV2D, ConvAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales); -TOSA_REGISTER_OP(FULLY_CONNECTED, FullyConnectedAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales); -TOSA_REGISTER_OP(MATMUL, MatMulAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(MAX_POOL2D, PoolAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(TRANSPOSE_CONV2D, TransposeConvAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales); +TOSA_REGISTER_OP(ARGMAX, ArgMaxAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(AVG_POOL2D, AvgPool2dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(CONV2D, Conv2dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(CONV3D, Conv3dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(DEPTHWISE_CONV2D, DepthwiseConv2dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(FFT2D, FFT2dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(MATMUL, MatMulAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(MAX_POOL2D, MaxPool2dAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(RFFT2D, RFFT2dAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(TRANSPOSE_CONV2D, TransposeConv2dAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Weights, GraphApi::GraphTensorUsage::Scales, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); TOSA_REGISTER_OP(CLAMP, ClampAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(SIGMOID, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(TANH, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(ADD, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(ERF, ErfAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SIGMOID, SigmoidAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(TANH, TanhAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(ADD, AddAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); TOSA_REGISTER_OP(ARITHMETIC_RIGHT_SHIFT, ArithmeticRightShiftAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(BITWISE_AND, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(BITWISE_OR, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(BITWISE_XOR, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(INTDIV, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_AND, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_LEFT_SHIFT, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_RIGHT_SHIFT, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_OR, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_XOR, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(MAXIMUM, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(MINIMUM, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(MUL, MulAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(POW, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(SUB, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(BITWISE_AND, BitwiseAndAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(BITWISE_OR, BitwiseOrAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(BITWISE_XOR, BitwiseXorAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(INTDIV, IntDivAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_AND, LogicalAndAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_LEFT_SHIFT, LogicalLeftShiftAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_RIGHT_SHIFT, LogicalRightShiftAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_OR, LogicalOrAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_XOR, LogicalXorAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(MAXIMUM, MaximumAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(MINIMUM, MinimumAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(MUL, MulAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params); +TOSA_REGISTER_OP(POW, PowAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SUB, SubAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); TOSA_REGISTER_OP(TABLE, TableAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params); -TOSA_REGISTER_OP(ABS, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(BITWISE_NOT, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(CEIL, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(CLZ, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(EXP, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(FLOOR, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOG, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(LOGICAL_NOT, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(NEGATE, NegateAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(RECIPROCAL, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(RSQRT, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(SELECT, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(EQUAL, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(GREATER, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(GREATER_EQUAL, NONE, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_ANY, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_ALL, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_MAX, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_MIN, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_PRODUCT, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(REDUCE_SUM, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(CONCAT, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(PAD, PadAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params); +TOSA_REGISTER_OP(ABS, AbsAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(BITWISE_NOT, BitwiseNotAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(CEIL, CeilAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(CLZ, ClzAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(COS, CosAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(EXP, ExpAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(FLOOR, FloorAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOG, LogAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(LOGICAL_NOT, LogicalNotAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(NEGATE, NegateAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); +TOSA_REGISTER_OP(RECIPROCAL, ReciprocalAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(RSQRT, RsqrtAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SIN, SinAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SELECT, SelectAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(EQUAL, EqualAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(GREATER, GreaterAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(GREATER_EQUAL, GreaterEqualAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_ALL, ReduceAllAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_ANY, ReduceAnyAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_MAX, ReduceMaxAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_MIN, ReduceMinAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_PRODUCT, ReduceProductAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REDUCE_SUM, ReduceSumAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(CONCAT, ConcatAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(PAD, PadAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); TOSA_REGISTER_OP(RESHAPE, ReshapeAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params); -TOSA_REGISTER_OP(REVERSE, AxisAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(SLICE, SliceAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(REVERSE, ReverseAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SLICE, SliceAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params0, GraphApi::GraphTensorUsage::Params1); TOSA_REGISTER_OP(TILE, TileAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params); TOSA_REGISTER_OP(TRANSPOSE, TransposeAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(GATHER, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(SCATTER, NONE, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(GATHER, GatherAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(SCATTER, ScatterAttribute, GraphApi::GraphTensorUsage::IFM); TOSA_REGISTER_OP(RESIZE, ResizeAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params, GraphApi::GraphTensorUsage::Params1, GraphApi::GraphTensorUsage::Params2); -TOSA_REGISTER_OP(CAST, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(RESCALE, RescaleAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params, GraphApi::GraphTensorUsage::Params1); -TOSA_REGISTER_OP(CONST, NONE, ); -TOSA_REGISTER_OP(IDENTITY, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(CUSTOM, NONE, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(CAST, CastAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(RESCALE, RescaleAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::Params, GraphApi::GraphTensorUsage::Params1, GraphApi::GraphTensorUsage::Params2, GraphApi::GraphTensorUsage::Params3); +TOSA_REGISTER_OP(CONST, ConstAttribute, ); +TOSA_REGISTER_OP(IDENTITY, IdentityAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(CUSTOM, CustomAttribute, GraphApi::GraphTensorUsage::IFM); TOSA_REGISTER_OP(COND_IF, CondIfAttribute, GraphApi::GraphTensorUsage::IFM); TOSA_REGISTER_OP(WHILE_LOOP, WhileLoopAttribute, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(FFT2D, FFTAttribute, GraphApi::GraphTensorUsage::IFM, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(RFFT2D, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(ERF, NONE, GraphApi::GraphTensorUsage::IFM); -TOSA_REGISTER_OP(DIM, AxisAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(VARIABLE, VariableAttribute, ); +TOSA_REGISTER_OP(VARIABLE_WRITE, VariableWriteAttribute, GraphApi::GraphTensorUsage::IFM); +TOSA_REGISTER_OP(VARIABLE_READ, VariableReadAttribute, ); +TOSA_REGISTER_OP(CONST_SHAPE, ConstShapeAttribute, ); + +#define FOR_ALL_AXIS_SELECT_TYPES(functor, sep) \ + functor(ARGMAX) sep \ + functor(REDUCE_ANY) sep \ + functor(REDUCE_ALL) sep \ + functor(REDUCE_MAX) sep \ + functor(REDUCE_MIN) sep \ + functor(REDUCE_PRODUCT) sep \ + functor(REDUCE_SUM) sep \ + functor(CONCAT) sep \ + functor(REVERSE) // clang-format on } // namespace @@ -309,15 +348,15 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listversion()); + tosa_assert(model, "No model"); + const auto &version = SafeDeref(model->version(), "Could not find version"); const uint32_t ver_word = (uint32_t(version._major()) << 24) | (uint32_t(version._minor()) << 8) | uint32_t(version._patch()); - for ( const auto &tosa_region : SafeDeref(model->regions()) ) + for ( const auto &tosa_region : SafeDeref(model->regions(), "No regions") ) { - for ( const auto &tosa_basicblock : SafeDeref(tosa_region->blocks()) ) + for ( const auto &tosa_basicblock : SafeDeref(tosa_region->blocks(), "No blocks") ) { - const char *bbName = SafeDeref(tosa_basicblock->name()).c_str(); + const char *bbName = SafeDeref(tosa_basicblock->name(), "No basic block name").c_str(); tosa_assert(bbName, "Basic block needs a valid name"); builders.emplace_back(bbName); GraphApi::IGraphBuilder *builder = &builders.back(); @@ -327,26 +366,35 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list tensors; std::unordered_map shapes; std::unordered_map types; - tensors.reserve(SafeDeref(tosa_basicblock->tensors()).size()); + tensors.reserve(SafeDeref(tosa_basicblock->tensors(), "No tensors").size()); + + // Vector to API shape + auto ToApiShape = [](const ::flatbuffers::Vector *in) -> GraphApi::GraphShape + { + GraphApi::GraphShape out; + const auto &buf = SafeDeref(in, "No shape vector"); + tosa_assert(buf.size() <= std::size(out.axisNHWC), "Shape rank exceeds maximum allowed"); + for ( int i = 0; i < int(buf.size()); i++ ) + { + out.axisNHWC[i] = buf[i]; + } + out.count = buf.size(); + return out; + }; for ( const auto &tosa_tensor : SafeDeref(tosa_basicblock->tensors()) ) { GraphApi::GraphBuffer *buffer = nullptr; - const char *name = SafeDeref(tosa_tensor->name()).c_str(); + const char *name = SafeDeref(tosa_tensor->name(), "No tensor name").c_str(); tosa_assert(name, "Tensor needs a valid name"); const auto type = TosaMapping::TensorTypeToDataType(tosa_tensor->type()); + tosa_assert(type != GraphApi::GraphDataType::Unknown, "Unknown data type"); const bool variable = tosa_tensor->variable(); const bool is_unranked = tosa_tensor->is_unranked(); tosa_assert(!is_unranked, "Unranked tensors not supported"); - Shape shape; // Defaults to shapeless - const auto &tensorShape = tosa_tensor->shape(); - if ( tensorShape && tensorShape->size() ) - { - shape = Shape(tensorShape->data(), tensorShape->size()); - } const auto &tensorData = tosa_tensor->data(); if ( tensorData && tensorData->size() ) { @@ -354,9 +402,7 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listshape()); auto tensor = builder->CreateTensor(name, tosaShape, GraphApi::GraphTensorLayout::Linear, type, buffer); builder_assert(tensor, "Failed to create tensor"); @@ -368,20 +414,48 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listAddPersistent(tensor); } - const auto &tosa_operators = SafeDeref(tosa_basicblock->operators()); + // Decode shape objects as tensors + // TODO: MLBEDSW-10904 Improve support for TosaShape + for ( const auto &tosa_shape : SafeDeref(tosa_basicblock->shapes()) ) + { + GraphApi::GraphBuffer *buffer = nullptr; + + const char *name = SafeDeref(tosa_shape->name(), "No shape name").c_str(); + tosa_assert(name, "Shape needs a valid name"); + const auto type = GraphApi::GraphDataType::Int64; + + const auto &shapeData = tosa_shape->data(); + if ( shapeData && shapeData->size() ) + { + buffer = builder->CreateBuffer(shapeData->size(), GraphApi::BufferMapping::Alias, shapeData->Data()); + builder_assert(buffer, "Failed to create buffer"); + } + + GraphApi::GraphShape tosaShape; + tosaShape.count = 1; + tosaShape.axisNHWC[0] = tosa_shape->rank(); + auto tensor = builder->CreateTensor(name, tosaShape, GraphApi::GraphTensorLayout::Linear, type, buffer); + builder_assert(tensor, "Failed to create tensor"); + + tosa_assert(tensors.count(name) == 0, "Shape and Tensor name collision"); + tensors[name] = tensor; + } + + const auto &tosa_operators = SafeDeref(tosa_basicblock->operators(), "No operators"); for ( int tosa_op_index = 0; tosa_op_index < int(tosa_operators.size()); tosa_op_index++ ) { - const auto &tosa_operator = SafeDeref(tosa_operators[tosa_op_index]); + const auto &tosa_operator = SafeDeref(tosa_operators[tosa_op_index], "Invalid operator"); + if ( tosa_operator.op() == tosaFb::Op::CONST_SHAPE ) continue; // Connect operation to its input tensors std::vector input_tensors; if ( tosa_operator.inputs() ) { - const auto &input_tensors_fb = SafeDeref(tosa_operator.inputs()); + const auto &input_tensors_fb = SafeDeref(tosa_operator.inputs(), "No inputs"); input_tensors.reserve(input_tensors_fb.size()); for ( const auto &ten : input_tensors_fb ) - input_tensors.push_back(SafeDeref(ten).str()); + input_tensors.push_back(SafeDeref(ten, "Invalid tensor name").str()); } - const auto &output_tensors = SafeDeref(tosa_operator.outputs()); + const auto &output_tensors = SafeDeref(tosa_operator.outputs(), "No outputs"); // Kernel GraphApi::GraphKernel kernel = {}; @@ -391,25 +465,25 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list 1); + tosa_assert(input_tensors.size() > 1, "Missing DEPTHWISE_CONV2D input tensor"); const auto &shape = shapes.at(input_tensors[1]); kernel.sizeYXZ[0] = shape.axisNHWC[0]; kernel.sizeYXZ[1] = shape.axisNHWC[1]; kernel.sizeYXZ[2] = 1; const auto &attr = TosaAttr::Get(tosa_operator); - tosa_assert(attr.pad()); - tosa_assert(attr.pad()->size() == 4); + tosa_assert(attr.pad(), "Missing DEPTHWISE_CONV2D pad attribute"); + tosa_assert(attr.pad()->size() == 4, "Invalid DEPTHWISE_CONV2D pad attribute"); kernel.paddingTBLRNF[0] = (*attr.pad())[0]; kernel.paddingTBLRNF[1] = (*attr.pad())[1]; kernel.paddingTBLRNF[2] = (*attr.pad())[2]; kernel.paddingTBLRNF[3] = (*attr.pad())[3]; - tosa_assert(attr.stride()); - tosa_assert(attr.stride()->size() == 2); + tosa_assert(attr.stride(), "Missing DEPTHWISE_CONV2D stride attribute"); + tosa_assert(attr.stride()->size() == 2, "Invalid DEPTHWISE_CONV2D stride attribute"); kernel.strideYXZ[0] = (*attr.stride())[0]; kernel.strideYXZ[1] = (*attr.stride())[1]; kernel.strideYXZ[2] = 1; - tosa_assert(attr.dilation()); - tosa_assert(attr.dilation()->size() == 2); + tosa_assert(attr.dilation(), "Missing DEPTHWISE_CONV2D dilation attribute"); + tosa_assert(attr.dilation()->size() == 2, "Invalid DEPTHWISE_CONV2D dilation attribute"); kernel.dilationYXZ[0] = (*attr.dilation())[0]; kernel.dilationYXZ[1] = (*attr.dilation())[1]; kernel.dilationYXZ[2] = 1; @@ -418,25 +492,25 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list 1); + tosa_assert(input_tensors.size() > 1, "Missing CONV2D input tensor"); const auto &shape = shapes.at(input_tensors[1]); kernel.sizeYXZ[0] = shape.axisNHWC[1]; kernel.sizeYXZ[1] = shape.axisNHWC[2]; kernel.sizeYXZ[2] = 1; const auto &attr = TosaAttr::Get(tosa_operator); - tosa_assert(attr.pad()); - tosa_assert(attr.pad()->size() == 4); + tosa_assert(attr.pad(), "Missing CONV2D pad attribute"); + tosa_assert(attr.pad()->size() == 4, "Invalid CONV2D pad attribute"); kernel.paddingTBLRNF[0] = (*attr.pad())[0]; kernel.paddingTBLRNF[1] = (*attr.pad())[1]; kernel.paddingTBLRNF[2] = (*attr.pad())[2]; kernel.paddingTBLRNF[3] = (*attr.pad())[3]; - tosa_assert(attr.stride()); - tosa_assert(attr.stride()->size() == 2); + tosa_assert(attr.stride(), "Missing CONV2D stride attribute"); + tosa_assert(attr.stride()->size() == 2, "Invalid CONV2D stride attribute"); kernel.strideYXZ[0] = (*attr.stride())[0]; kernel.strideYXZ[1] = (*attr.stride())[1]; kernel.strideYXZ[2] = 1; - tosa_assert(attr.dilation()); - tosa_assert(attr.dilation()->size() == 2); + tosa_assert(attr.dilation(), "Missing CONV2D dilation attribute"); + tosa_assert(attr.dilation()->size() == 2, "Invalid CONV2D dilation attribute"); kernel.dilationYXZ[0] = (*attr.dilation())[0]; kernel.dilationYXZ[1] = (*attr.dilation())[1]; kernel.dilationYXZ[2] = 1; @@ -445,28 +519,28 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list 1); + tosa_assert(input_tensors.size() > 1, "Missing CONV3D input tensor"); const auto &shape = shapes.at(input_tensors[1]); - tosa_assert(shape.count == 5); + tosa_assert(shape.count == 5, "Invalid CONV3D input rank"); kernel.sizeYXZ[0] = shape.axisNHWC[2]; kernel.sizeYXZ[1] = shape.axisNHWC[3]; kernel.sizeYXZ[2] = shape.axisNHWC[1]; const auto &attr = TosaAttr::Get(tosa_operator); - tosa_assert(attr.pad()); - tosa_assert(attr.pad()->size() == 6); + tosa_assert(attr.pad(), "Missing CONV3D pad attribute"); + tosa_assert(attr.pad()->size() == 6, "Invalid CONV3D pad attribute"); kernel.paddingTBLRNF[0] = (*attr.pad())[2]; kernel.paddingTBLRNF[1] = (*attr.pad())[3]; kernel.paddingTBLRNF[2] = (*attr.pad())[4]; kernel.paddingTBLRNF[3] = (*attr.pad())[5]; kernel.paddingTBLRNF[4] = (*attr.pad())[0]; kernel.paddingTBLRNF[5] = (*attr.pad())[1]; - tosa_assert(attr.stride()); - tosa_assert(attr.stride()->size() == 3); + tosa_assert(attr.stride(), "Missing CONV3D stride attribute"); + tosa_assert(attr.stride()->size() == 3, "Invalid CONV3D stride attribute"); kernel.strideYXZ[0] = (*attr.stride())[1]; kernel.strideYXZ[1] = (*attr.stride())[2]; kernel.strideYXZ[2] = (*attr.stride())[0]; - tosa_assert(attr.dilation()); - tosa_assert(attr.dilation()->size() == 3); + tosa_assert(attr.dilation(), "Missing CONV3D dilation attribute"); + tosa_assert(attr.dilation()->size() == 3, "Invalid CONV3D dilation attribute"); kernel.dilationYXZ[0] = (*attr.dilation())[1]; kernel.dilationYXZ[1] = (*attr.dilation())[2]; kernel.dilationYXZ[2] = (*attr.dilation())[0]; @@ -475,7 +549,7 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list 1); + tosa_assert(input_tensors.size() > 1, "Missing TRANSPOSE_CONV2D input tensor"); const auto &shape = shapes.at(input_tensors[1]); kernel.sizeYXZ[0] = shape.axisNHWC[1]; kernel.sizeYXZ[1] = shape.axisNHWC[2]; @@ -487,8 +561,8 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listsize() == 2); + tosa_assert(attr.stride(), "Missing TRANSPOSE_CONV2D stride attribute"); + tosa_assert(attr.stride()->size() == 2, "Invalid TRANSPOSE_CONV2D stride attribute"); kernel.strideYXZ[0] = (*attr.stride())[0]; kernel.strideYXZ[1] = (*attr.stride())[1]; kernel.strideYXZ[2] = 1; @@ -498,24 +572,47 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list::Get(tosa_operator); - tosa_assert(attr.kernel()); - tosa_assert(attr.kernel()->size() == 2); + tosa_assert(attr.kernel(), "Missing AVG_POOL2D kernel attribute"); + tosa_assert(attr.kernel()->size() == 2, "Invalid AVG_POOL2D kernel attribute"); kernel.sizeYXZ[0] = (*attr.kernel())[0]; kernel.sizeYXZ[1] = (*attr.kernel())[1]; kernel.sizeYXZ[2] = 1; - tosa_assert(attr.pad()); - tosa_assert(attr.pad()->size() == 4); + tosa_assert(attr.pad(), "Missing AVG_POOL2D pad attribute"); + tosa_assert(attr.pad()->size() == 4, "Invalid AVG_POOL2D pad attribute"); kernel.paddingTBLRNF[0] = (*attr.pad())[0]; kernel.paddingTBLRNF[1] = (*attr.pad())[1]; kernel.paddingTBLRNF[2] = (*attr.pad())[2]; kernel.paddingTBLRNF[3] = (*attr.pad())[3]; - tosa_assert(attr.stride()); - tosa_assert(attr.stride()->size() == 2); + tosa_assert(attr.stride(), "Missing AVG_POOL2D stride attribute"); + tosa_assert(attr.stride()->size() == 2, "Invalid AVG_POOL2D stride attribute"); + kernel.strideYXZ[0] = (*attr.stride())[0]; + kernel.strideYXZ[1] = (*attr.stride())[1]; + kernel.strideYXZ[2] = 1; + kernel.dilationYXZ[0] = 1; + kernel.dilationYXZ[1] = 1; + kernel.dilationYXZ[2] = 1; + } + break; + case tosaFb::Op::MAX_POOL2D: + { + kernelPtr = &kernel; + const auto &attr = TosaAttr::Get(tosa_operator); + tosa_assert(attr.kernel(), "Missing MAX_POOL2D kernel attribute"); + tosa_assert(attr.kernel()->size() == 2, "Invalid MAX_POOL2D kernel attribute"); + kernel.sizeYXZ[0] = (*attr.kernel())[0]; + kernel.sizeYXZ[1] = (*attr.kernel())[1]; + kernel.sizeYXZ[2] = 1; + tosa_assert(attr.pad(), "Missing MAX_POOL2D pad attribute"); + tosa_assert(attr.pad()->size() == 4, "Invalid MAX_POOL2D pad attribute"); + kernel.paddingTBLRNF[0] = (*attr.pad())[0]; + kernel.paddingTBLRNF[1] = (*attr.pad())[1]; + kernel.paddingTBLRNF[2] = (*attr.pad())[2]; + kernel.paddingTBLRNF[3] = (*attr.pad())[3]; + tosa_assert(attr.stride(), "Missing MAX_POOL2D stride attribute"); + tosa_assert(attr.stride()->size() == 2, "Invalid MAX_POOL2D stride attribute"); kernel.strideYXZ[0] = (*attr.stride())[0]; kernel.strideYXZ[1] = (*attr.stride())[1]; kernel.strideYXZ[2] = 1; @@ -527,24 +624,11 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listCreateOp(TosaMapping::FBOpToOp(tosa_operator.op()), kernelPtr); + auto op_type = TosaMapping::FBOpToOp(tosa_operator.op()); + tosa_assert(op_type != tosa::Op::UNKNOWN, "Unknown data type"); + auto op = builder->CreateOp(op_type, kernelPtr); + builder_assert(op, fmt::format("Failed to create {} operation", tosaFb::EnumNameOp(tosa_operator.op()))); builder->SetExternalId(op, tosa_op_index); - builder_assert(op, "Failed to create operation"); - - // Fix op Attributes - auto ToApiShape = [](const ::flatbuffers::Vector *in) -> GraphApi::GraphShape - { - GraphApi::GraphShape out; - const auto &buf = SafeDeref(in); - tosa_assert(buf.size() <= std::size(out.axisNHWC), "Shape rank exceeds maximum allowed"); - for ( int i = 0; i < int(buf.size()); i++ ) - { - out.axisNHWC[i] = buf[i]; - } - out.count = buf.size(); - return out; - }; switch ( tosa_operator.op() ) { @@ -558,52 +642,39 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list::Get(tosa_operator); - double clamp_min = tosa_attr.min_int(); - double clamp_max = tosa_attr.max_int(); - if ( tosa_attr.min_fp() != nullptr ) + double clamp_min = 0; + double clamp_max = 0; + tosa_assert(input_tensors.size() > 0, "Missing CLAMP input tensor"); + auto type = types.at(input_tensors[0]); + switch ( type ) { - tosa_assert(input_tensors.size() > 0); - auto type = types.at(input_tensors[0]); - switch ( type ) - { - case GraphApi::GraphDataType::Int48: - clamp_min = ToDouble(tosa_attr.min_fp()); - clamp_max = ToDouble(tosa_attr.max_fp()); - break; - case GraphApi::GraphDataType::Float32: - clamp_min = ToDouble(tosa_attr.min_fp()); - clamp_max = ToDouble(tosa_attr.max_fp()); - break; - case GraphApi::GraphDataType::Float16: - clamp_min = ToDouble(tosa_attr.min_fp()); - clamp_max = ToDouble(tosa_attr.max_fp()); - break; - case GraphApi::GraphDataType::BFloat16: - clamp_min = ToDouble(tosa_attr.min_fp()); - clamp_max = ToDouble(tosa_attr.max_fp()); - break; - default: // empty - break; - } + case GraphApi::GraphDataType::Int8: + clamp_min = ToDouble(tosa_attr.min_val()); + clamp_max = ToDouble(tosa_attr.max_val()); + break; + case GraphApi::GraphDataType::Int16: + clamp_min = ToDouble(tosa_attr.min_val()); + clamp_max = ToDouble(tosa_attr.max_val()); + break; + case GraphApi::GraphDataType::Float32: + clamp_min = ToDouble(tosa_attr.min_val()); + clamp_max = ToDouble(tosa_attr.max_val()); + break; + case GraphApi::GraphDataType::Float16: + clamp_min = ToDouble(tosa_attr.min_val()); + clamp_max = ToDouble(tosa_attr.max_val()); + break; + case GraphApi::GraphDataType::BFloat16: + clamp_min = ToDouble(tosa_attr.min_val()); + clamp_max = ToDouble(tosa_attr.max_val()); + break; + default: // empty + break; } builder_assert(builder->Set(op, OpAttr::CLAMP_MIN, clamp_min), "Failed to set CLAMP_MIN attribute on CLAMP"); builder_assert(builder->Set(op, OpAttr::CLAMP_MAX, clamp_max), "Failed to set CLAMP_MAX attribute on CLAMP"); } break; - case tosaFb::Op::SLICE: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder_assert(builder->Set(op, OpAttr::SLICE_BEGIN, ToApiShape(tosa_attr.start())), - "Failed to set SLICE_BEGIN attribute on SLICE"); - builder_assert(builder->Set(op, OpAttr::SLICE_SIZE, ToApiShape(tosa_attr.size())), "Failed to set SLICE_SIZE attribute on SLICE"); - } - break; - case tosaFb::Op::MUL: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder_assert(builder->Set(op, OpAttr::MUL_SHIFT, tosa_attr.shift()), "Failed to set MUL_SHIFT attribute on MUL"); - } - break; case tosaFb::Op::TRANSPOSE: { const auto &tosa_attr = TosaAttr::Get(tosa_operator); @@ -614,18 +685,26 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list::Get(tosa_operator); - builder_assert(builder->Set(op, OpAttr::COND_IF, SafeDeref(tosa_attr.then_branch()).c_str()), + builder_assert( + builder->Set(op, OpAttr::COND_IF, + SafeDeref(tosa_attr.then_graph(), "COND_IF: No then graph").c_str()), "Failed to set COND_IF attribute on COND_IF"); - builder_assert(builder->Set(op, OpAttr::COND_ELSE, SafeDeref(tosa_attr.else_branch()).c_str()), + builder_assert( + builder->Set(op, OpAttr::COND_ELSE, + SafeDeref(tosa_attr.else_graph(), "COND_IF: No else graph").c_str()), "Failed to set COND_ELSE attribute on COND_IF"); } break; case tosaFb::Op::WHILE_LOOP: { const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder_assert(builder->Set(op, OpAttr::WHILE_BODY, SafeDeref(tosa_attr.body_branch()).c_str()), + builder_assert( + builder->Set(op, OpAttr::WHILE_BODY, + SafeDeref(tosa_attr.body_graph(), "WHILE_LOOP: No body graph").c_str()), "Failed to set WHILE_BODY attribute on WHILE_LOOP"); - builder_assert(builder->Set(op, OpAttr::WHILE_COND, SafeDeref(tosa_attr.cond_branch()).c_str()), + builder_assert( + builder->Set(op, OpAttr::WHILE_COND, + SafeDeref(tosa_attr.cond_graph(), "WHILE_LOOP: No cond graph").c_str()), "Failed to set WHILE_COND attribute on WHILE_LOOP"); } break; @@ -634,7 +713,7 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::list::Get(tosa_operator); builder_assert(builder->Set(op, GraphApi::OpAttr::RESCALE_SCALE32, tosa_attr.scale32()), "Failed to set RESCALE_SCALE32 attribute on RESCALE"); - builder_assert(builder->Set(op, GraphApi::OpAttr::RESCALE_DOUBLE_ROUND, tosa_attr.double_round()), + builder_assert(builder->Set(op, GraphApi::OpAttr::RESCALE_DOUBLE_ROUND, tosa_attr.rounding_mode() == tosaFb::RoundingMode::DOUBLE_ROUND), "Failed to set RESCALE_DOUBLE_ROUND attribute on RESCALE"); builder_assert(builder->Set(op, GraphApi::OpAttr::RESCALE_PER_CHANNEL, tosa_attr.per_channel()), "Failed to set RESCALE_PER_CHANNEL attribute on RESCALE"); @@ -642,178 +721,31 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listSet(op, GraphApi::OpAttr::RESCALE_OUTPUT_UNSIGNED, tosa_attr.output_unsigned()), "Failed to set RESCALE_OUTPUT_UNSIGNED attribute on RESCALE"); - - if ( input_tensors.size() == 1 ) - { - std::string name = "multiplier_param" + std::to_string(tosa_op_index); - if ( tosa_attr.scale32() ) - tensors[name] = CreateParamTensor(tosa_attr.multiplier(), builder, name); - else tensors[name] = CreateParamTensor(tosa_attr.multiplier(), builder, name); - input_tensors.push_back(name); - name = "shift_param" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.shift(), builder, name); - input_tensors.push_back(std::move(name)); - } - } - break; - case tosaFb::Op::RESHAPE: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder_assert(builder->Set(op, OpAttr::RESHAPE_SHAPE, ToApiShape(tosa_attr.new_shape())), - "Failed to set RESHAPE_SHAPE attribute on RESHAPE"); } break; case tosaFb::Op::RESIZE: { const auto &tosa_attr = TosaAttr::Get(tosa_operator); - tosa_assert(tosa_attr.scale()); - tosa_assert(tosa_attr.scale()->size() == 4); - tosa_assert(tosa_attr.offset()); - tosa_assert(tosa_attr.offset()->size() == 2); - tosa_assert(tosa_attr.border()); - tosa_assert(tosa_attr.border()->size() == 2); - - builder_assert( - builder->Set(op, GraphApi::OpAttr::RESIZE_SCALEY, - GraphApi::FractionND{(*tosa_attr.scale())[0], (*tosa_attr.scale())[1]}), - "Failed to set RESIZE_SCALEY attribute on RESIZE"); - builder_assert( - builder->Set(op, GraphApi::OpAttr::RESIZE_SCALEX, - GraphApi::FractionND{(*tosa_attr.scale())[2], (*tosa_attr.scale())[3]}), - "Failed to set RESIZE_SCALEX attribute on RESIZE"); - builder_assert( - builder->Set(op, GraphApi::OpAttr::RESIZE_OFFSET, - GraphApi::Point2{(*tosa_attr.offset())[1], (*tosa_attr.offset())[0]}), - "Failed to set RESIZE_OFFSET attribute on RESIZE"); - builder_assert( - builder->Set(op, GraphApi::OpAttr::RESIZE_BORDER, - GraphApi::Point2{(*tosa_attr.border())[1], (*tosa_attr.border())[0]}), - "Failed to set RESIZE_BORDER attribute on RESIZE"); - builder_assert( - builder->Set(op, GraphApi::OpAttr::RESIZE_MODE, - int(TosaMapping::FBResizeModeToResizeMode(tosa_attr.mode()))), - "Failed to RESIZE_MODE attribute on RESIZE"); - - // If no input tensors for scale/offset/border, create them to be backwards compatible - if ( input_tensors.size() == 1 ) - { - std::string name = "scale_param" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.scale(), builder, name); - input_tensors.push_back(name); - name = "offset_param" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.offset(), builder, name); - input_tensors.push_back(std::move(name)); - name = "border_param" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.border(), builder, name); - input_tensors.push_back(std::move(name)); - } - } - break; - case tosaFb::Op::ARGMAX: - [[fallthrough]]; - case tosaFb::Op::REDUCE_ANY: - [[fallthrough]]; - case tosaFb::Op::REDUCE_ALL: - [[fallthrough]]; - case tosaFb::Op::REDUCE_MAX: - [[fallthrough]]; - case tosaFb::Op::REDUCE_MIN: - [[fallthrough]]; - case tosaFb::Op::REDUCE_PRODUCT: - [[fallthrough]]; - case tosaFb::Op::REDUCE_SUM: - [[fallthrough]]; - case tosaFb::Op::CONCAT: - [[fallthrough]]; - case tosaFb::Op::REVERSE: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder_assert(builder->Set(op, GraphApi::OpAttr::AXIS_SELECT, tosa_attr.axis()), "Failed to set AXIS_SELECT attribute on REVERSE"); - break; - } - case tosaFb::Op::TILE: - { - // If no input tensors for multiples, convert multiples attribute to param tensor - if ( input_tensors.size() == 1 ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - std::string name = "multiples" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.multiples(), builder, name); - input_tensors.push_back(std::move(name)); - } - break; - } - break; - case tosaFb::Op::PAD: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - double pad_const = tosa_attr.pad_const_int(); - if ( tosa_attr.pad_const_fp() != nullptr ) - { - tosa_assert(input_tensors.size() > 0); - auto type = types.at(input_tensors[0]); - switch ( type ) - { - case GraphApi::GraphDataType::Int48: - pad_const = ToDouble(tosa_attr.pad_const_fp()); - break; - case GraphApi::GraphDataType::Float32: - pad_const = ToDouble(tosa_attr.pad_const_fp()); - break; - case GraphApi::GraphDataType::Float16: - pad_const = ToDouble(tosa_attr.pad_const_fp()); - break; - case GraphApi::GraphDataType::BFloat16: - pad_const = ToDouble(tosa_attr.pad_const_fp()); - break; - default: // empty - break; - } - } - builder_assert(builder->Set(op, OpAttr::PAD_PAD_CONST, pad_const), "Failed to set PAD_CONST attribute on PAD"); - - // If no input tensors for padding, convert padding attribute to param tensor - if ( input_tensors.size() == 1 ) - { - // Padding tensor has 2D shape, but padding attribute has 1D shape - Shape shape(SafeDeref(tosa_attr.padding()).size() / 2, 2); - GraphApi::GraphShape tosaShape; - tosaShape.count = shape.ToNHWC(tosaShape.axisNHWC, std::size(tosaShape.axisNHWC)); - std::string name = "padding_param" + std::to_string(tosa_op_index); - tensors[name] = CreateParamTensor(tosa_attr.padding(), builder, name, &tosaShape); - input_tensors.push_back(std::move(name)); - } - } - break; - case tosaFb::Op::TABLE: - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - if ( input_tensors.size() == 1 ) - { - std::string name = "table_param" + std::to_string(tosa_op_index); - auto type = types.at(input_tensors[0]); - assert(type == GraphApi::GraphDataType::Int8 || type == GraphApi::GraphDataType::Int16); - if ( type == GraphApi::GraphDataType::Int8 ) - { - tensors[name] = CreateParamTensor(tosa_attr.table(), builder, name); - } - else - { - tensors[name] = CreateParamTensor(tosa_attr.table(), builder, name); - } - input_tensors.push_back(std::move(name)); - } + const auto mode = TosaMapping::FBResizeModeToResizeMode(tosa_attr.mode()); + tosa_assert(mode != tosa::ResizeMode::UNKNOWN, "Unknown resize mode"); + builder_assert(builder->Set(op, GraphApi::OpAttr::RESIZE_MODE, int(mode)), "Failed to set RESIZE_MODE attribute on RESIZE"); } break; +#define TYPE_FUNC(op_type) \ + case tosaFb::Op::op_type: \ + { \ + const auto &tosa_attr = TosaAttr::Get(tosa_operator); \ + builder_assert(builder->Set(op, GraphApi::OpAttr::AXIS_SELECT, tosa_attr.axis()), "Failed to set AXIS_SELECT attribute on " #op_type); \ + } \ + break + FOR_ALL_AXIS_SELECT_TYPES(TYPE_FUNC, ;); +#undef TYPE_FUNC +#undef FOR_ALL_AXIS_SELECT_TYPES case tosaFb::Op::TRANSPOSE_CONV2D: { const auto &tosa_attr = TosaAttr::Get(tosa_operator); tosa_assert(tosa_attr.out_pad()); tosa_assert(tosa_attr.out_pad()->size() == 4); - tosa_assert(tosa_attr.output_shape()); - tosa_assert(tosa_attr.output_shape()->size() == 4); - builder_assert(builder->Set(op, OpAttr::TRANSPOSE_CONV2D_OUTSHAPE, ToApiShape(tosa_attr.output_shape())), - "Failed to set OUTSHAPE attribute on TRANSPOSE_CONV2D"); builder_assert(builder->Set(op, OpAttr::TRANSPOSE_CONV2D_OUTPAD, ToApiShape(tosa_attr.out_pad())), "Failed to set OUTPAD attribute on TRANSPOSE_CONV2D"); } @@ -841,8 +773,12 @@ void TosaReader::LoadGraphs(const tosaFb::TosaGraph *model, std::listSetAxisOrder(tensor, GraphApi::AxisOrder::OHWI); } - else if ( tosa_operator.op() == tosaFb::Op::FULLY_CONNECTED ) - { - builder->SetAxisOrder(tensor, GraphApi::AxisOrder::OI); - } } builder->AddInput(op, usage, tensor); - - // Zero point - switch ( tosa_operator.op() ) - { - case tosaFb::Op::AVG_POOL2D: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input_zp())); - } - break; - case tosaFb::Op::CONV2D: - [[fallthrough]]; - case tosaFb::Op::CONV3D: - [[fallthrough]]; - case tosaFb::Op::DEPTHWISE_CONV2D: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input_zp())); - } - if ( usage == GraphApi::GraphTensorUsage::Weights ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.weight_zp())); - } - break; - case tosaFb::Op::FULLY_CONNECTED: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input_zp())); - } - if ( usage == GraphApi::GraphTensorUsage::Weights ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.weight_zp())); - } - break; - case tosaFb::Op::MATMUL: - if ( usage == GraphApi::GraphTensorUsage::IFM0 ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.a_zp())); - } - if ( usage == GraphApi::GraphTensorUsage::IFM1 ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.b_zp())); - } - break; - case tosaFb::Op::TRANSPOSE_CONV2D: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input_zp())); - } - if ( usage == GraphApi::GraphTensorUsage::Weights ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.weight_zp())); - } - break; - case tosaFb::Op::NEGATE: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input1_zp())); - } - break; - case tosaFb::Op::RESCALE: - if ( usage == GraphApi::GraphTensorUsage::IFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.input_zp())); - } - break; - default: - break; - } } // Add outputs for ( int i = 0; i < int(output_tensors.size()); i++ ) { - const auto &ten = SafeDeref(output_tensors[i]); + const auto &ten = SafeDeref(output_tensors[i], "Invalid output tensor name"); GraphApi::GraphTensorUsage usage = GraphApi::MakeTensorUsage(GraphApi::GraphTensorUsage::OFM, i); + tosa_assert(tensors.count(ten.str()), + fmt::format("{} operator output tensor '{}' not found", tosaFb::EnumNameOp(tosa_operator.op()), ten.str()) + .c_str()); builder->AddOutput(op, usage, tensors.at(ten.str())); - - // Zero point - switch ( tosa_operator.op() ) - { - case tosaFb::Op::AVG_POOL2D: - if ( usage == GraphApi::GraphTensorUsage::OFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.output_zp())); - } - break; - case tosaFb::Op::NEGATE: - if ( usage == GraphApi::GraphTensorUsage::OFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.output_zp())); - } - break; - case tosaFb::Op::RESCALE: - if ( usage == GraphApi::GraphTensorUsage::OFM ) - { - const auto &tosa_attr = TosaAttr::Get(tosa_operator); - builder->SetZeroPoint(op, usage, double(tosa_attr.output_zp())); - } - break; - default: - break; - } } } // Add graph inputs and outputs if ( tosa_basicblock->inputs() ) { - for ( auto ten : SafeDeref(tosa_basicblock->inputs()) ) + for ( auto ten : SafeDeref(tosa_basicblock->inputs(), "No BasicBlock inputs") ) { + tosa_assert(tensors.count(ten->str()), + fmt::format("BasicBlock input tensor '{}' not found", ten->str()).c_str()); builder->AddInput(tensors.at(ten->str())); } } - for ( auto ten : SafeDeref(tosa_basicblock->outputs()) ) + for ( auto ten : SafeDeref(tosa_basicblock->outputs(), "No BasicBlock outputs") ) { + tosa_assert(tensors.count(ten->str()), + fmt::format("BasicBlock output tensor '{}' not found", ten->str()).c_str()); builder->AddOutput(tensors.at(ten->str())); } } diff --git a/ethosu/regor/tosa/tosa_require_checks.cpp b/ethosu/regor/tosa/tosa_require_checks.cpp index dce9b3a2..804c6fc2 100644 --- a/ethosu/regor/tosa/tosa_require_checks.cpp +++ b/ethosu/regor/tosa/tosa_require_checks.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 -// Modify by implementing the constraints. +// Partially generated by tosaValidationGenerator for TOSA Specification 1.0.0draft +// TODO: Implement the constraints. #include "tosa_require_checks.hpp" @@ -26,11 +26,11 @@ namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 -void RequireCheck_25jhgrylo2an5(const regor::Operation *op, [[maybe_unused]] const Context &context) +// Checks for TOSA Specification 1.0.0draft +void RequireCheck_3gogyrefl20gp(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: ARITHMETIC_RIGHT_SHIFT, - static constexpr char constraint[] = "REQUIRE((in_out_t == int32_t && 0 <= value2 && value2 <= 31) || (in_out_t == int16_t && 0 <= value2 && value2 <= 15) || (in_out_t == int8_t && 0 <= value2 && value2 <= 7))"; + // Operators: ARITHMETIC_RIGHT_SHIFT, LOGICAL_LEFT_SHIFT, LOGICAL_RIGHT_SHIFT, + static constexpr char constraint[] = "REQUIRE((is_same() && 0 <= value2 && value2 <= 31) || (is_same() && 0 <= value2 && value2 <= 15) || (is_same() && 0 <= value2 && value2 <= 7))"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); @@ -45,119 +45,91 @@ void RequireCheck_35z4hcgn21c8p(const regor::Operation *op, [[maybe_unused]] con if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_2v5c1x79g8j7o(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: INTDIV, - static constexpr char constraint[] = "REQUIRE((int64_t)value1 / value2 <= maximum)"; - bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check - if ( !checkOk ) throw std::invalid_argument(constraint); -} - -void RequireCheck_3k2pr9vozq62t(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: LOGICAL_LEFT_SHIFT, LOGICAL_RIGHT_SHIFT, - static constexpr char constraint[] = "REQUIRE(0 <= value2 && value2 <= 31)"; - bool checkOk = true; - checkOk = (op != nullptr); // TODO: Implement check - if ( !checkOk ) throw std::invalid_argument(constraint); -} - -void RequireCheck_27adsuj7sthvo(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_2f51h19mqfhr8(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: MUL, - static constexpr char constraint[] = "REQUIRE(product >= minimum && product <= maximum)"; + static constexpr char constraint[] = "REQUIRE(0 <= shift && shift <= 63)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_3o6eotvyt76cz(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_3jqx5d6a2c85r(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: TABLE, - static constexpr char constraint[] = "REQUIRE(length(table) == TABLE_SIZE)"; + // Operators: MUL, + static constexpr char constraint[] = "REQUIRE(is_same() || shift == 0)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_31n0oq4yculbk(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_1b64l72fvni7o(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: GATHER, SCATTER, - static constexpr char constraint[] = "REQUIRE(0 <= k && k < K)"; + // Operators: MUL, + static constexpr char constraint[] = "REQUIRE(product >= minimum_s() && product <= maximum_s())"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_2apk8ly9uthz6(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_3otz8rylb4eh1(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: SCATTER, - static constexpr char constraint[] = "REQUIRE(output_modified[n,k,c] == false)"; + // Operators: POW, + static constexpr char constraint[] = "REQUIRE(value1 >= 0)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -} // namespace checks -} // namespace validator -} // namespace tosa -namespace tosa -{ -namespace validator +void RequireCheck_2p74g4god707n(const regor::Operation *op, [[maybe_unused]] const Context &context) { -namespace checks -{ -// Checks for TOSA specification 0.80.0 -void RequireCheck_7uc4ey0qoi0f(const regor::Operation *op, [[maybe_unused]] const Context &context) -{ - // Operators: ARITHMETIC_RIGHT_SHIFT, - static constexpr char constraint[] = "REQUIRE((in_out_t == i32_t && 0 <= value2 && value2 <= 31) || (in_out_t == i16_t && 0 <= value2 && value2 <= 15) || (in_out_t == i8_t && 0 <= value2 && value2 <= 7))"; + // Operators: POW, + static constexpr char constraint[] = "REQUIRE(value1 > 0 || value2 > 0)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_32ckjbsfiesgu(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_61j2lms4vo0v(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: INTDIV, - static constexpr char constraint[] = "REQUIRE(static_cast(value1) / static_cast(value2) <= maximum_s)"; + // Operators: POW, + static constexpr char constraint[] = "REQUIRE(!isNaN(value1) && !isNaN(value2))"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_1h6xoevynk8a0(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_3nkub9jwwaf4h(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: LOGICAL_RIGHT_SHIFT, - static constexpr char constraint[] = "REQUIRE(0 <= static_cast(value2) && static_cast(value2) <= 31)"; + // Operators: POW, + static constexpr char constraint[] = "REQUIRE(is_finite(value1) && is_finite(value2))"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_2f51h19mqfhr8(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_3o6eotvyt76cz(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: MUL, - static constexpr char constraint[] = "REQUIRE(0 <= shift && shift <= 63)"; + // Operators: TABLE, + static constexpr char constraint[] = "REQUIRE(length(table) == TABLE_SIZE)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_1oaur42wgph0t(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_31n0oq4yculbk(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: MUL, - static constexpr char constraint[] = "REQUIRE(in_t == int32_t || shift == 0)"; + // Operators: GATHER, SCATTER, + static constexpr char constraint[] = "REQUIRE(0 <= k && k < K)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_3dbpm758kyex1(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_2apk8ly9uthz6(const regor::Operation *op, [[maybe_unused]] const Context &context) { - // Operators: MUL, - static constexpr char constraint[] = "REQUIRE(product >= minimum_s && product <= maximum_s)"; + // Operators: SCATTER, + static constexpr char constraint[] = "REQUIRE(output_modified[n,k,c] == false)"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); @@ -217,10 +189,10 @@ void RequireCheck_2pd0619ns6vtd(const regor::Operation *op, [[maybe_unused]] con if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_54sthn768s68(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_12cwj1gynxopa(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: VARIABLE_WRITE, - static constexpr char constraint[] = "REQUIRE(variable_tensor.type == in_t)"; + static constexpr char constraint[] = "REQUIRE(is_same())"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); @@ -244,10 +216,10 @@ void RequireCheck_2wyo0jz6whe2p(const regor::Operation *op, [[maybe_unused]] con if ( !checkOk ) throw std::invalid_argument(constraint); } -void RequireCheck_8hliqs7zbosu(const regor::Operation *op, [[maybe_unused]] const Context &context) +void RequireCheck_2btzv7wkv70o0(const regor::Operation *op, [[maybe_unused]] const Context &context) { // Operators: VARIABLE_READ, - static constexpr char constraint[] = "REQUIRE(variable_tensor.type == out_t)"; + static constexpr char constraint[] = "REQUIRE(is_same())"; bool checkOk = true; checkOk = (op != nullptr); // TODO: Implement check if ( !checkOk ) throw std::invalid_argument(constraint); diff --git a/ethosu/regor/tosa/tosa_require_checks.hpp b/ethosu/regor/tosa/tosa_require_checks.hpp index 81c68563..b53f49f8 100644 --- a/ethosu/regor/tosa/tosa_require_checks.hpp +++ b/ethosu/regor/tosa/tosa_require_checks.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #pragma once @@ -28,41 +28,29 @@ namespace validator { namespace checks { -// Checks for TOSA Specification 0.60.0 -void RequireCheck_25jhgrylo2an5(const regor::Operation *op, const Context &context); +// Checks for TOSA Specification 1.0.0draft +void RequireCheck_3gogyrefl20gp(const regor::Operation *op, const Context &context); void RequireCheck_35z4hcgn21c8p(const regor::Operation *op, const Context &context); -void RequireCheck_2v5c1x79g8j7o(const regor::Operation *op, const Context &context); -void RequireCheck_3k2pr9vozq62t(const regor::Operation *op, const Context &context); -void RequireCheck_27adsuj7sthvo(const regor::Operation *op, const Context &context); +void RequireCheck_2f51h19mqfhr8(const regor::Operation *op, const Context &context); +void RequireCheck_3jqx5d6a2c85r(const regor::Operation *op, const Context &context); +void RequireCheck_1b64l72fvni7o(const regor::Operation *op, const Context &context); +void RequireCheck_3otz8rylb4eh1(const regor::Operation *op, const Context &context); +void RequireCheck_2p74g4god707n(const regor::Operation *op, const Context &context); +void RequireCheck_61j2lms4vo0v(const regor::Operation *op, const Context &context); +void RequireCheck_3nkub9jwwaf4h(const regor::Operation *op, const Context &context); void RequireCheck_3o6eotvyt76cz(const regor::Operation *op, const Context &context); void RequireCheck_31n0oq4yculbk(const regor::Operation *op, const Context &context); void RequireCheck_2apk8ly9uthz6(const regor::Operation *op, const Context &context); -} // namespace checks -} // namespace validator -} // namespace tosa -namespace tosa -{ -namespace validator -{ -namespace checks -{ -// Checks for TOSA specification 0.80.0 -void RequireCheck_7uc4ey0qoi0f(const regor::Operation *op, const Context &context); -void RequireCheck_32ckjbsfiesgu(const regor::Operation *op, const Context &context); -void RequireCheck_1h6xoevynk8a0(const regor::Operation *op, const Context &context); -void RequireCheck_2f51h19mqfhr8(const regor::Operation *op, const Context &context); -void RequireCheck_1oaur42wgph0t(const regor::Operation *op, const Context &context); -void RequireCheck_3dbpm758kyex1(const regor::Operation *op, const Context &context); void RequireCheck_3ah9e1mnk126p(const regor::Operation *op, const Context &context); void RequireCheck_2e13btqfr98am(const regor::Operation *op, const Context &context); void RequireCheck_lt5trq6bbw9w(const regor::Operation *op, const Context &context); void RequireCheck_1nuiu459z8num(const regor::Operation *op, const Context &context); void RequireCheck_182ljbxwn59zs(const regor::Operation *op, const Context &context); void RequireCheck_2pd0619ns6vtd(const regor::Operation *op, const Context &context); -void RequireCheck_54sthn768s68(const regor::Operation *op, const Context &context); +void RequireCheck_12cwj1gynxopa(const regor::Operation *op, const Context &context); void RequireCheck_7uvvy4pqp2pj(const regor::Operation *op, const Context &context); void RequireCheck_2wyo0jz6whe2p(const regor::Operation *op, const Context &context); -void RequireCheck_8hliqs7zbosu(const regor::Operation *op, const Context &context); +void RequireCheck_2btzv7wkv70o0(const regor::Operation *op, const Context &context); } // namespace checks } // namespace validator } // namespace tosa diff --git a/ethosu/regor/tosa/tosa_schema_generated.hpp b/ethosu/regor/tosa/tosa_schema_generated.hpp index 87aa208c..fb963033 100644 --- a/ethosu/regor/tosa/tosa_schema_generated.hpp +++ b/ethosu/regor/tosa/tosa_schema_generated.hpp @@ -2,7 +2,7 @@ // // To reproduce: // flatc version 24.12.23 -// schema.fbs v0.80.0 (2f3f1225db5280209cc42b8564b64c97) +// schema.fbs v1.0 (f1f9a7d91624ade57e005c8f0bdce2dd395363b1) // sed -i 's/namespace tosa/namespace tosaFb/g' schema.fbs // flatc --cpp --scoped-enums --reflect-names schema.fbs // clang-format -i tosa_generated.h @@ -21,44 +21,212 @@ static_assert(FLATBUFFERS_VERSION_MAJOR == 24 && FLATBUFFERS_VERSION_MINOR == 12 namespace tosaFb { -struct PoolAttribute; -struct PoolAttributeBuilder; +struct ArgMaxAttribute; +struct ArgMaxAttributeBuilder; -struct ConvAttribute; -struct ConvAttributeBuilder; +struct AvgPool2dAttribute; +struct AvgPool2dAttributeBuilder; -struct TransposeConvAttribute; -struct TransposeConvAttributeBuilder; +struct Conv2dAttribute; +struct Conv2dAttributeBuilder; + +struct Conv3dAttribute; +struct Conv3dAttributeBuilder; + +struct DepthwiseConv2dAttribute; +struct DepthwiseConv2dAttributeBuilder; + +struct FFT2dAttribute; +struct FFT2dAttributeBuilder; + +struct MatMulAttribute; +struct MatMulAttributeBuilder; + +struct MaxPool2dAttribute; +struct MaxPool2dAttributeBuilder; + +struct RFFT2dAttribute; +struct RFFT2dAttributeBuilder; + +struct TransposeConv2dAttribute; +struct TransposeConv2dAttributeBuilder; + +struct ClampAttribute; +struct ClampAttributeBuilder; + +struct ErfAttribute; +struct ErfAttributeBuilder; + +struct SigmoidAttribute; +struct SigmoidAttributeBuilder; + +struct TanhAttribute; +struct TanhAttributeBuilder; + +struct AddAttribute; +struct AddAttributeBuilder; + +struct ArithmeticRightShiftAttribute; +struct ArithmeticRightShiftAttributeBuilder; + +struct BitwiseAndAttribute; +struct BitwiseAndAttributeBuilder; + +struct BitwiseOrAttribute; +struct BitwiseOrAttributeBuilder; + +struct BitwiseXorAttribute; +struct BitwiseXorAttributeBuilder; + +struct IntDivAttribute; +struct IntDivAttributeBuilder; + +struct LogicalAndAttribute; +struct LogicalAndAttributeBuilder; + +struct LogicalLeftShiftAttribute; +struct LogicalLeftShiftAttributeBuilder; + +struct LogicalRightShiftAttribute; +struct LogicalRightShiftAttributeBuilder; + +struct LogicalOrAttribute; +struct LogicalOrAttributeBuilder; + +struct LogicalXorAttribute; +struct LogicalXorAttributeBuilder; + +struct MaximumAttribute; +struct MaximumAttributeBuilder; + +struct MinimumAttribute; +struct MinimumAttributeBuilder; + +struct MulAttribute; +struct MulAttributeBuilder; + +struct PowAttribute; +struct PowAttributeBuilder; + +struct SubAttribute; +struct SubAttributeBuilder; + +struct TableAttribute; +struct TableAttributeBuilder; + +struct AbsAttribute; +struct AbsAttributeBuilder; + +struct BitwiseNotAttribute; +struct BitwiseNotAttributeBuilder; + +struct CeilAttribute; +struct CeilAttributeBuilder; + +struct ClzAttribute; +struct ClzAttributeBuilder; + +struct CosAttribute; +struct CosAttributeBuilder; + +struct ExpAttribute; +struct ExpAttributeBuilder; + +struct FloorAttribute; +struct FloorAttributeBuilder; + +struct LogAttribute; +struct LogAttributeBuilder; + +struct LogicalNotAttribute; +struct LogicalNotAttributeBuilder; + +struct NegateAttribute; +struct NegateAttributeBuilder; + +struct ReciprocalAttribute; +struct ReciprocalAttributeBuilder; + +struct RsqrtAttribute; +struct RsqrtAttributeBuilder; + +struct SinAttribute; +struct SinAttributeBuilder; + +struct SelectAttribute; +struct SelectAttributeBuilder; + +struct EqualAttribute; +struct EqualAttributeBuilder; + +struct GreaterAttribute; +struct GreaterAttributeBuilder; + +struct GreaterEqualAttribute; +struct GreaterEqualAttributeBuilder; + +struct ReduceAllAttribute; +struct ReduceAllAttributeBuilder; + +struct ReduceAnyAttribute; +struct ReduceAnyAttributeBuilder; + +struct ReduceMaxAttribute; +struct ReduceMaxAttributeBuilder; + +struct ReduceMinAttribute; +struct ReduceMinAttributeBuilder; + +struct ReduceProductAttribute; +struct ReduceProductAttributeBuilder; + +struct ReduceSumAttribute; +struct ReduceSumAttributeBuilder; + +struct ConcatAttribute; +struct ConcatAttributeBuilder; struct PadAttribute; struct PadAttributeBuilder; -struct AxisAttribute; -struct AxisAttributeBuilder; - struct ReshapeAttribute; struct ReshapeAttributeBuilder; +struct ReverseAttribute; +struct ReverseAttributeBuilder; + struct SliceAttribute; struct SliceAttributeBuilder; struct TileAttribute; struct TileAttributeBuilder; +struct TransposeAttribute; +struct TransposeAttributeBuilder; + +struct GatherAttribute; +struct GatherAttributeBuilder; + +struct ScatterAttribute; +struct ScatterAttributeBuilder; + struct ResizeAttribute; struct ResizeAttributeBuilder; -struct ClampAttribute; -struct ClampAttributeBuilder; +struct CastAttribute; +struct CastAttributeBuilder; struct RescaleAttribute; struct RescaleAttributeBuilder; -struct MulAttribute; -struct MulAttributeBuilder; +struct ConstAttribute; +struct ConstAttributeBuilder; -struct ArithmeticRightShiftAttribute; -struct ArithmeticRightShiftAttributeBuilder; +struct IdentityAttribute; +struct IdentityAttributeBuilder; + +struct CustomAttribute; +struct CustomAttributeBuilder; struct CondIfAttribute; struct CondIfAttributeBuilder; @@ -66,26 +234,17 @@ struct CondIfAttributeBuilder; struct WhileLoopAttribute; struct WhileLoopAttributeBuilder; -struct TransposeAttribute; -struct TransposeAttributeBuilder; - -struct TableAttribute; -struct TableAttributeBuilder; - -struct MatMulAttribute; -struct MatMulAttributeBuilder; +struct VariableAttribute; +struct VariableAttributeBuilder; -struct FullyConnectedAttribute; -struct FullyConnectedAttributeBuilder; +struct VariableWriteAttribute; +struct VariableWriteAttributeBuilder; -struct NegateAttribute; -struct NegateAttributeBuilder; - -struct CustomAttribute; -struct CustomAttributeBuilder; +struct VariableReadAttribute; +struct VariableReadAttributeBuilder; -struct FFTAttribute; -struct FFTAttributeBuilder; +struct ConstShapeAttribute; +struct ConstShapeAttributeBuilder; struct Version; struct VersionBuilder; @@ -93,6 +252,12 @@ struct VersionBuilder; struct TosaTensor; struct TosaTensorBuilder; +struct TosaShape; +struct TosaShapeBuilder; + +struct OpLocation; +struct OpLocationBuilder; + struct TosaOperator; struct TosaOperatorBuilder; @@ -105,54 +270,164 @@ struct TosaRegionBuilder; struct TosaGraph; struct TosaGraphBuilder; -inline const ::flatbuffers::TypeTable *PoolAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *ArgMaxAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *ConvAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *AvgPool2dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *TransposeConvAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *Conv2dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *PadAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *Conv3dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *AxisAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *DepthwiseConv2dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *ReshapeAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *FFT2dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *SliceAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *MatMulAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *TileAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *MaxPool2dAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *ResizeAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *RFFT2dAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *TransposeConv2dAttributeTypeTable(); inline const ::flatbuffers::TypeTable *ClampAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *RescaleAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *ErfAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *MulAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *SigmoidAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *TanhAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *AddAttributeTypeTable(); inline const ::flatbuffers::TypeTable *ArithmeticRightShiftAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *CondIfAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *BitwiseAndAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *WhileLoopAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *BitwiseOrAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *TransposeAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *BitwiseXorAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *IntDivAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalAndAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalLeftShiftAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalRightShiftAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalOrAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalXorAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *MaximumAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *MinimumAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *MulAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *PowAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *SubAttributeTypeTable(); inline const ::flatbuffers::TypeTable *TableAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *MatMulAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *AbsAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *BitwiseNotAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *CeilAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ClzAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *CosAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ExpAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *FloorAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *FullyConnectedAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *LogAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *LogicalNotAttributeTypeTable(); inline const ::flatbuffers::TypeTable *NegateAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *ReciprocalAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *RsqrtAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *SinAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *SelectAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *EqualAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *GreaterAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *GreaterEqualAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceAllAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceAnyAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceMaxAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceMinAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceProductAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReduceSumAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ConcatAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *PadAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReshapeAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ReverseAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *SliceAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *TileAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *TransposeAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *GatherAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ScatterAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ResizeAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *CastAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *RescaleAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ConstAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *IdentityAttributeTypeTable(); + inline const ::flatbuffers::TypeTable *CustomAttributeTypeTable(); -inline const ::flatbuffers::TypeTable *FFTAttributeTypeTable(); +inline const ::flatbuffers::TypeTable *CondIfAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *WhileLoopAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *VariableAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *VariableWriteAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *VariableReadAttributeTypeTable(); + +inline const ::flatbuffers::TypeTable *ConstShapeAttributeTypeTable(); inline const ::flatbuffers::TypeTable *VersionTypeTable(); inline const ::flatbuffers::TypeTable *TosaTensorTypeTable(); +inline const ::flatbuffers::TypeTable *TosaShapeTypeTable(); + +inline const ::flatbuffers::TypeTable *OpLocationTypeTable(); + inline const ::flatbuffers::TypeTable *TosaOperatorTypeTable(); inline const ::flatbuffers::TypeTable *TosaBasicBlockTypeTable(); @@ -165,38 +440,38 @@ enum class DType : uint32_t { UNKNOWN = 0, BOOL = 1, - UINT8 = 2, - INT4 = 3, - INT8 = 4, - INT16 = 5, - INT32 = 6, - INT48 = 7, - FP32 = 8, - UINT16 = 9, - FP16 = 10, - BF16 = 11, - SHAPE = 12, + INT4 = 2, + INT8 = 3, + INT16 = 4, + INT32 = 5, + INT48 = 6, + FP32 = 7, + FP16 = 8, + BF16 = 9, + SHAPE = 10, + FP8E4M3 = 11, + FP8E5M2 = 12, MIN = UNKNOWN, - MAX = SHAPE + MAX = FP8E5M2 }; inline const DType (&EnumValuesDType())[13] { - static const DType values[] = {DType::UNKNOWN, DType::BOOL, DType::UINT8, DType::INT4, DType::INT8, DType::INT16, - DType::INT32, DType::INT48, DType::FP32, DType::UINT16, DType::FP16, DType::BF16, DType::SHAPE}; + static const DType values[] = {DType::UNKNOWN, DType::BOOL, DType::INT4, DType::INT8, DType::INT16, DType::INT32, + DType::INT48, DType::FP32, DType::FP16, DType::BF16, DType::SHAPE, DType::FP8E4M3, DType::FP8E5M2}; return values; } inline const char *const *EnumNamesDType() { - static const char *const names[14] = {"UNKNOWN", "BOOL", "UINT8", "INT4", "INT8", "INT16", "INT32", "INT48", "FP32", - "UINT16", "FP16", "BF16", "SHAPE", nullptr}; + static const char *const names[14] = {"UNKNOWN", "BOOL", "INT4", "INT8", "INT16", "INT32", "INT48", "FP32", "FP16", + "BF16", "SHAPE", "FP8E4M3", "FP8E5M2", nullptr}; return names; } inline const char *EnumNameDType(DType e) { - if ( ::flatbuffers::IsOutRange(e, DType::UNKNOWN, DType::SHAPE) ) return ""; + if ( ::flatbuffers::IsOutRange(e, DType::UNKNOWN, DType::FP8E5M2) ) return ""; const size_t index = static_cast(e); return EnumNamesDType()[index]; } @@ -229,6 +504,64 @@ inline const char *EnumNameResizeMode(ResizeMode e) return EnumNamesResizeMode()[index]; } +enum class NanPropagationMode : uint32_t +{ + UNKNOWN = 0, + PROPAGATE = 1, + IGNORE = 2, + MIN = UNKNOWN, + MAX = IGNORE +}; + +inline const NanPropagationMode (&EnumValuesNanPropagationMode())[3] +{ + static const NanPropagationMode values[] = {NanPropagationMode::UNKNOWN, NanPropagationMode::PROPAGATE, NanPropagationMode::IGNORE}; + return values; +} + +inline const char *const *EnumNamesNanPropagationMode() +{ + static const char *const names[4] = {"UNKNOWN", "PROPAGATE", "IGNORE", nullptr}; + return names; +} + +inline const char *EnumNameNanPropagationMode(NanPropagationMode e) +{ + if ( ::flatbuffers::IsOutRange(e, NanPropagationMode::UNKNOWN, NanPropagationMode::IGNORE) ) return ""; + const size_t index = static_cast(e); + return EnumNamesNanPropagationMode()[index]; +} + +enum class RoundingMode : uint32_t +{ + UNKNOWN = 0, + SINGLE_ROUND = 1, + INEXACT_ROUND = 2, + DOUBLE_ROUND = 3, + MIN = UNKNOWN, + MAX = DOUBLE_ROUND +}; + +inline const RoundingMode (&EnumValuesRoundingMode())[4] +{ + static const RoundingMode values[] = { + RoundingMode::UNKNOWN, RoundingMode::SINGLE_ROUND, RoundingMode::INEXACT_ROUND, RoundingMode::DOUBLE_ROUND}; + return values; +} + +inline const char *const *EnumNamesRoundingMode() +{ + static const char *const names[5] = {"UNKNOWN", "SINGLE_ROUND", "INEXACT_ROUND", "DOUBLE_ROUND", nullptr}; + return names; +} + +inline const char *EnumNameRoundingMode(RoundingMode e) +{ + if ( ::flatbuffers::IsOutRange(e, RoundingMode::UNKNOWN, RoundingMode::DOUBLE_ROUND) ) return ""; + const size_t index = static_cast(e); + return EnumNamesRoundingMode()[index]; +} + enum class Op : uint32_t { UNKNOWN = 0, @@ -237,107 +570,111 @@ enum class Op : uint32_t CONV2D = 3, CONV3D = 4, DEPTHWISE_CONV2D = 5, - FULLY_CONNECTED = 6, + FFT2D = 6, MATMUL = 7, MAX_POOL2D = 8, - TRANSPOSE_CONV2D = 9, - CLAMP = 10, - RESERVED = 11, - SIGMOID = 12, - TANH = 13, - ADD = 14, - ARITHMETIC_RIGHT_SHIFT = 15, - BITWISE_AND = 16, - BITWISE_OR = 17, - BITWISE_XOR = 18, - INTDIV = 19, - LOGICAL_AND = 20, - LOGICAL_LEFT_SHIFT = 21, - LOGICAL_RIGHT_SHIFT = 22, - LOGICAL_OR = 23, - LOGICAL_XOR = 24, - MAXIMUM = 25, - MINIMUM = 26, - MUL = 27, - POW = 28, - SUB = 29, - TABLE = 30, - ABS = 31, - BITWISE_NOT = 32, - CEIL = 33, - CLZ = 34, - EXP = 35, - FLOOR = 36, - LOG = 37, - LOGICAL_NOT = 38, - NEGATE = 39, - RECIPROCAL = 40, - RSQRT = 41, - SELECT = 42, - EQUAL = 43, - GREATER = 44, - GREATER_EQUAL = 45, - REDUCE_ANY = 46, - REDUCE_ALL = 47, - REDUCE_MAX = 48, - REDUCE_MIN = 49, - REDUCE_PRODUCT = 50, - REDUCE_SUM = 51, - CONCAT = 52, - PAD = 53, - RESHAPE = 54, - REVERSE = 55, - SLICE = 56, - TILE = 57, - TRANSPOSE = 58, - GATHER = 59, - SCATTER = 60, - RESIZE = 61, - CAST = 62, - RESCALE = 63, - CONST = 64, - IDENTITY = 65, - CUSTOM = 66, - COND_IF = 67, - WHILE_LOOP = 68, - FFT2D = 69, - RFFT2D = 70, - ERF = 71, - DIM = 72, + RFFT2D = 9, + TRANSPOSE_CONV2D = 10, + CLAMP = 11, + ERF = 12, + SIGMOID = 13, + TANH = 14, + ADD = 15, + ARITHMETIC_RIGHT_SHIFT = 16, + BITWISE_AND = 17, + BITWISE_OR = 18, + BITWISE_XOR = 19, + INTDIV = 20, + LOGICAL_AND = 21, + LOGICAL_LEFT_SHIFT = 22, + LOGICAL_RIGHT_SHIFT = 23, + LOGICAL_OR = 24, + LOGICAL_XOR = 25, + MAXIMUM = 26, + MINIMUM = 27, + MUL = 28, + POW = 29, + SUB = 30, + TABLE = 31, + ABS = 32, + BITWISE_NOT = 33, + CEIL = 34, + CLZ = 35, + COS = 36, + EXP = 37, + FLOOR = 38, + LOG = 39, + LOGICAL_NOT = 40, + NEGATE = 41, + RECIPROCAL = 42, + RSQRT = 43, + SIN = 44, + SELECT = 45, + EQUAL = 46, + GREATER = 47, + GREATER_EQUAL = 48, + REDUCE_ALL = 49, + REDUCE_ANY = 50, + REDUCE_MAX = 51, + REDUCE_MIN = 52, + REDUCE_PRODUCT = 53, + REDUCE_SUM = 54, + CONCAT = 55, + PAD = 56, + RESHAPE = 57, + REVERSE = 58, + SLICE = 59, + TILE = 60, + TRANSPOSE = 61, + GATHER = 62, + SCATTER = 63, + RESIZE = 64, + CAST = 65, + RESCALE = 66, + CONST = 67, + IDENTITY = 68, + CUSTOM = 69, + COND_IF = 70, + WHILE_LOOP = 71, + VARIABLE = 72, + VARIABLE_WRITE = 73, + VARIABLE_READ = 74, + CONST_SHAPE = 75, MIN = UNKNOWN, - MAX = DIM + MAX = CONST_SHAPE }; -inline const Op (&EnumValuesOp())[73] +inline const Op (&EnumValuesOp())[76] { static const Op values[] = {Op::UNKNOWN, Op::ARGMAX, Op::AVG_POOL2D, Op::CONV2D, Op::CONV3D, Op::DEPTHWISE_CONV2D, - Op::FULLY_CONNECTED, Op::MATMUL, Op::MAX_POOL2D, Op::TRANSPOSE_CONV2D, Op::CLAMP, Op::RESERVED, Op::SIGMOID, Op::TANH, - Op::ADD, Op::ARITHMETIC_RIGHT_SHIFT, Op::BITWISE_AND, Op::BITWISE_OR, Op::BITWISE_XOR, Op::INTDIV, Op::LOGICAL_AND, - Op::LOGICAL_LEFT_SHIFT, Op::LOGICAL_RIGHT_SHIFT, Op::LOGICAL_OR, Op::LOGICAL_XOR, Op::MAXIMUM, Op::MINIMUM, Op::MUL, - Op::POW, Op::SUB, Op::TABLE, Op::ABS, Op::BITWISE_NOT, Op::CEIL, Op::CLZ, Op::EXP, Op::FLOOR, Op::LOG, Op::LOGICAL_NOT, - Op::NEGATE, Op::RECIPROCAL, Op::RSQRT, Op::SELECT, Op::EQUAL, Op::GREATER, Op::GREATER_EQUAL, Op::REDUCE_ANY, - Op::REDUCE_ALL, Op::REDUCE_MAX, Op::REDUCE_MIN, Op::REDUCE_PRODUCT, Op::REDUCE_SUM, Op::CONCAT, Op::PAD, Op::RESHAPE, - Op::REVERSE, Op::SLICE, Op::TILE, Op::TRANSPOSE, Op::GATHER, Op::SCATTER, Op::RESIZE, Op::CAST, Op::RESCALE, - Op::CONST, Op::IDENTITY, Op::CUSTOM, Op::COND_IF, Op::WHILE_LOOP, Op::FFT2D, Op::RFFT2D, Op::ERF, Op::DIM}; + Op::FFT2D, Op::MATMUL, Op::MAX_POOL2D, Op::RFFT2D, Op::TRANSPOSE_CONV2D, Op::CLAMP, Op::ERF, Op::SIGMOID, + Op::TANH, Op::ADD, Op::ARITHMETIC_RIGHT_SHIFT, Op::BITWISE_AND, Op::BITWISE_OR, Op::BITWISE_XOR, Op::INTDIV, + Op::LOGICAL_AND, Op::LOGICAL_LEFT_SHIFT, Op::LOGICAL_RIGHT_SHIFT, Op::LOGICAL_OR, Op::LOGICAL_XOR, Op::MAXIMUM, + Op::MINIMUM, Op::MUL, Op::POW, Op::SUB, Op::TABLE, Op::ABS, Op::BITWISE_NOT, Op::CEIL, Op::CLZ, Op::COS, + Op::EXP, Op::FLOOR, Op::LOG, Op::LOGICAL_NOT, Op::NEGATE, Op::RECIPROCAL, Op::RSQRT, Op::SIN, Op::SELECT, + Op::EQUAL, Op::GREATER, Op::GREATER_EQUAL, Op::REDUCE_ALL, Op::REDUCE_ANY, Op::REDUCE_MAX, Op::REDUCE_MIN, + Op::REDUCE_PRODUCT, Op::REDUCE_SUM, Op::CONCAT, Op::PAD, Op::RESHAPE, Op::REVERSE, Op::SLICE, Op::TILE, + Op::TRANSPOSE, Op::GATHER, Op::SCATTER, Op::RESIZE, Op::CAST, Op::RESCALE, Op::CONST, Op::IDENTITY, Op::CUSTOM, + Op::COND_IF, Op::WHILE_LOOP, Op::VARIABLE, Op::VARIABLE_WRITE, Op::VARIABLE_READ, Op::CONST_SHAPE}; return values; } inline const char *const *EnumNamesOp() { - static const char *const names[74] = {"UNKNOWN", "ARGMAX", "AVG_POOL2D", "CONV2D", "CONV3D", "DEPTHWISE_CONV2D", - "FULLY_CONNECTED", "MATMUL", "MAX_POOL2D", "TRANSPOSE_CONV2D", "CLAMP", "RESERVED", "SIGMOID", "TANH", "ADD", - "ARITHMETIC_RIGHT_SHIFT", "BITWISE_AND", "BITWISE_OR", "BITWISE_XOR", "INTDIV", "LOGICAL_AND", "LOGICAL_LEFT_SHIFT", - "LOGICAL_RIGHT_SHIFT", "LOGICAL_OR", "LOGICAL_XOR", "MAXIMUM", "MINIMUM", "MUL", "POW", "SUB", "TABLE", "ABS", - "BITWISE_NOT", "CEIL", "CLZ", "EXP", "FLOOR", "LOG", "LOGICAL_NOT", "NEGATE", "RECIPROCAL", "RSQRT", "SELECT", - "EQUAL", "GREATER", "GREATER_EQUAL", "REDUCE_ANY", "REDUCE_ALL", "REDUCE_MAX", "REDUCE_MIN", "REDUCE_PRODUCT", - "REDUCE_SUM", "CONCAT", "PAD", "RESHAPE", "REVERSE", "SLICE", "TILE", "TRANSPOSE", "GATHER", "SCATTER", "RESIZE", - "CAST", "RESCALE", "CONST", "IDENTITY", "CUSTOM", "COND_IF", "WHILE_LOOP", "FFT2D", "RFFT2D", "ERF", "DIM", nullptr}; + static const char *const names[77] = {"UNKNOWN", "ARGMAX", "AVG_POOL2D", "CONV2D", "CONV3D", "DEPTHWISE_CONV2D", "FFT2D", + "MATMUL", "MAX_POOL2D", "RFFT2D", "TRANSPOSE_CONV2D", "CLAMP", "ERF", "SIGMOID", "TANH", "ADD", "ARITHMETIC_RIGHT_SHIFT", + "BITWISE_AND", "BITWISE_OR", "BITWISE_XOR", "INTDIV", "LOGICAL_AND", "LOGICAL_LEFT_SHIFT", "LOGICAL_RIGHT_SHIFT", + "LOGICAL_OR", "LOGICAL_XOR", "MAXIMUM", "MINIMUM", "MUL", "POW", "SUB", "TABLE", "ABS", "BITWISE_NOT", "CEIL", "CLZ", + "COS", "EXP", "FLOOR", "LOG", "LOGICAL_NOT", "NEGATE", "RECIPROCAL", "RSQRT", "SIN", "SELECT", "EQUAL", "GREATER", + "GREATER_EQUAL", "REDUCE_ALL", "REDUCE_ANY", "REDUCE_MAX", "REDUCE_MIN", "REDUCE_PRODUCT", "REDUCE_SUM", "CONCAT", + "PAD", "RESHAPE", "REVERSE", "SLICE", "TILE", "TRANSPOSE", "GATHER", "SCATTER", "RESIZE", "CAST", "RESCALE", "CONST", + "IDENTITY", "CUSTOM", "COND_IF", "WHILE_LOOP", "VARIABLE", "VARIABLE_WRITE", "VARIABLE_READ", "CONST_SHAPE", nullptr}; return names; } inline const char *EnumNameOp(Op e) { - if ( ::flatbuffers::IsOutRange(e, Op::UNKNOWN, Op::DIM) ) return ""; + if ( ::flatbuffers::IsOutRange(e, Op::UNKNOWN, Op::CONST_SHAPE) ) return ""; const size_t index = static_cast(e); return EnumNamesOp()[index]; } @@ -345,55 +682,132 @@ inline const char *EnumNameOp(Op e) enum class Attribute : uint8_t { NONE = 0, - PoolAttribute = 1, - ConvAttribute = 2, - TransposeConvAttribute = 3, - PadAttribute = 4, - AxisAttribute = 5, - ReshapeAttribute = 6, - SliceAttribute = 7, - TileAttribute = 8, - ResizeAttribute = 9, - ClampAttribute = 10, - RescaleAttribute = 11, - MulAttribute = 12, - ArithmeticRightShiftAttribute = 13, - CondIfAttribute = 14, - WhileLoopAttribute = 15, - TransposeAttribute = 16, - TableAttribute = 17, - MatMulAttribute = 18, - FullyConnectedAttribute = 19, - NegateAttribute = 20, - CustomAttribute = 21, - FFTAttribute = 22, + ArgMaxAttribute = 1, + AvgPool2dAttribute = 2, + Conv2dAttribute = 3, + Conv3dAttribute = 4, + DepthwiseConv2dAttribute = 5, + FFT2dAttribute = 6, + MatMulAttribute = 7, + MaxPool2dAttribute = 8, + RFFT2dAttribute = 9, + TransposeConv2dAttribute = 10, + ClampAttribute = 11, + ErfAttribute = 12, + SigmoidAttribute = 13, + TanhAttribute = 14, + AddAttribute = 15, + ArithmeticRightShiftAttribute = 16, + BitwiseAndAttribute = 17, + BitwiseOrAttribute = 18, + BitwiseXorAttribute = 19, + IntDivAttribute = 20, + LogicalAndAttribute = 21, + LogicalLeftShiftAttribute = 22, + LogicalRightShiftAttribute = 23, + LogicalOrAttribute = 24, + LogicalXorAttribute = 25, + MaximumAttribute = 26, + MinimumAttribute = 27, + MulAttribute = 28, + PowAttribute = 29, + SubAttribute = 30, + TableAttribute = 31, + AbsAttribute = 32, + BitwiseNotAttribute = 33, + CeilAttribute = 34, + ClzAttribute = 35, + CosAttribute = 36, + ExpAttribute = 37, + FloorAttribute = 38, + LogAttribute = 39, + LogicalNotAttribute = 40, + NegateAttribute = 41, + ReciprocalAttribute = 42, + RsqrtAttribute = 43, + SinAttribute = 44, + SelectAttribute = 45, + EqualAttribute = 46, + GreaterAttribute = 47, + GreaterEqualAttribute = 48, + ReduceAllAttribute = 49, + ReduceAnyAttribute = 50, + ReduceMaxAttribute = 51, + ReduceMinAttribute = 52, + ReduceProductAttribute = 53, + ReduceSumAttribute = 54, + ConcatAttribute = 55, + PadAttribute = 56, + ReshapeAttribute = 57, + ReverseAttribute = 58, + SliceAttribute = 59, + TileAttribute = 60, + TransposeAttribute = 61, + GatherAttribute = 62, + ScatterAttribute = 63, + ResizeAttribute = 64, + CastAttribute = 65, + RescaleAttribute = 66, + ConstAttribute = 67, + IdentityAttribute = 68, + CustomAttribute = 69, + CondIfAttribute = 70, + WhileLoopAttribute = 71, + VariableAttribute = 72, + VariableWriteAttribute = 73, + VariableReadAttribute = 74, + ConstShapeAttribute = 75, MIN = NONE, - MAX = FFTAttribute + MAX = ConstShapeAttribute }; -inline const Attribute (&EnumValuesAttribute())[23] -{ - static const Attribute values[] = {Attribute::NONE, Attribute::PoolAttribute, Attribute::ConvAttribute, - Attribute::TransposeConvAttribute, Attribute::PadAttribute, Attribute::AxisAttribute, Attribute::ReshapeAttribute, - Attribute::SliceAttribute, Attribute::TileAttribute, Attribute::ResizeAttribute, Attribute::ClampAttribute, - Attribute::RescaleAttribute, Attribute::MulAttribute, Attribute::ArithmeticRightShiftAttribute, Attribute::CondIfAttribute, - Attribute::WhileLoopAttribute, Attribute::TransposeAttribute, Attribute::TableAttribute, Attribute::MatMulAttribute, - Attribute::FullyConnectedAttribute, Attribute::NegateAttribute, Attribute::CustomAttribute, Attribute::FFTAttribute}; +inline const Attribute (&EnumValuesAttribute())[76] +{ + static const Attribute values[] = {Attribute::NONE, Attribute::ArgMaxAttribute, Attribute::AvgPool2dAttribute, + Attribute::Conv2dAttribute, Attribute::Conv3dAttribute, Attribute::DepthwiseConv2dAttribute, + Attribute::FFT2dAttribute, Attribute::MatMulAttribute, Attribute::MaxPool2dAttribute, Attribute::RFFT2dAttribute, + Attribute::TransposeConv2dAttribute, Attribute::ClampAttribute, Attribute::ErfAttribute, Attribute::SigmoidAttribute, + Attribute::TanhAttribute, Attribute::AddAttribute, Attribute::ArithmeticRightShiftAttribute, Attribute::BitwiseAndAttribute, + Attribute::BitwiseOrAttribute, Attribute::BitwiseXorAttribute, Attribute::IntDivAttribute, Attribute::LogicalAndAttribute, + Attribute::LogicalLeftShiftAttribute, Attribute::LogicalRightShiftAttribute, Attribute::LogicalOrAttribute, + Attribute::LogicalXorAttribute, Attribute::MaximumAttribute, Attribute::MinimumAttribute, Attribute::MulAttribute, + Attribute::PowAttribute, Attribute::SubAttribute, Attribute::TableAttribute, Attribute::AbsAttribute, + Attribute::BitwiseNotAttribute, Attribute::CeilAttribute, Attribute::ClzAttribute, Attribute::CosAttribute, + Attribute::ExpAttribute, Attribute::FloorAttribute, Attribute::LogAttribute, Attribute::LogicalNotAttribute, + Attribute::NegateAttribute, Attribute::ReciprocalAttribute, Attribute::RsqrtAttribute, Attribute::SinAttribute, + Attribute::SelectAttribute, Attribute::EqualAttribute, Attribute::GreaterAttribute, Attribute::GreaterEqualAttribute, + Attribute::ReduceAllAttribute, Attribute::ReduceAnyAttribute, Attribute::ReduceMaxAttribute, Attribute::ReduceMinAttribute, + Attribute::ReduceProductAttribute, Attribute::ReduceSumAttribute, Attribute::ConcatAttribute, Attribute::PadAttribute, + Attribute::ReshapeAttribute, Attribute::ReverseAttribute, Attribute::SliceAttribute, Attribute::TileAttribute, + Attribute::TransposeAttribute, Attribute::GatherAttribute, Attribute::ScatterAttribute, Attribute::ResizeAttribute, + Attribute::CastAttribute, Attribute::RescaleAttribute, Attribute::ConstAttribute, Attribute::IdentityAttribute, + Attribute::CustomAttribute, Attribute::CondIfAttribute, Attribute::WhileLoopAttribute, Attribute::VariableAttribute, + Attribute::VariableWriteAttribute, Attribute::VariableReadAttribute, Attribute::ConstShapeAttribute}; return values; } inline const char *const *EnumNamesAttribute() { - static const char *const names[24] = {"NONE", "PoolAttribute", "ConvAttribute", "TransposeConvAttribute", "PadAttribute", - "AxisAttribute", "ReshapeAttribute", "SliceAttribute", "TileAttribute", "ResizeAttribute", "ClampAttribute", "RescaleAttribute", - "MulAttribute", "ArithmeticRightShiftAttribute", "CondIfAttribute", "WhileLoopAttribute", "TransposeAttribute", "TableAttribute", - "MatMulAttribute", "FullyConnectedAttribute", "NegateAttribute", "CustomAttribute", "FFTAttribute", nullptr}; + static const char *const names[77] = {"NONE", "ArgMaxAttribute", "AvgPool2dAttribute", "Conv2dAttribute", "Conv3dAttribute", + "DepthwiseConv2dAttribute", "FFT2dAttribute", "MatMulAttribute", "MaxPool2dAttribute", "RFFT2dAttribute", + "TransposeConv2dAttribute", "ClampAttribute", "ErfAttribute", "SigmoidAttribute", "TanhAttribute", "AddAttribute", + "ArithmeticRightShiftAttribute", "BitwiseAndAttribute", "BitwiseOrAttribute", "BitwiseXorAttribute", "IntDivAttribute", + "LogicalAndAttribute", "LogicalLeftShiftAttribute", "LogicalRightShiftAttribute", "LogicalOrAttribute", + "LogicalXorAttribute", "MaximumAttribute", "MinimumAttribute", "MulAttribute", "PowAttribute", "SubAttribute", + "TableAttribute", "AbsAttribute", "BitwiseNotAttribute", "CeilAttribute", "ClzAttribute", "CosAttribute", + "ExpAttribute", "FloorAttribute", "LogAttribute", "LogicalNotAttribute", "NegateAttribute", "ReciprocalAttribute", + "RsqrtAttribute", "SinAttribute", "SelectAttribute", "EqualAttribute", "GreaterAttribute", "GreaterEqualAttribute", + "ReduceAllAttribute", "ReduceAnyAttribute", "ReduceMaxAttribute", "ReduceMinAttribute", "ReduceProductAttribute", + "ReduceSumAttribute", "ConcatAttribute", "PadAttribute", "ReshapeAttribute", "ReverseAttribute", "SliceAttribute", + "TileAttribute", "TransposeAttribute", "GatherAttribute", "ScatterAttribute", "ResizeAttribute", "CastAttribute", + "RescaleAttribute", "ConstAttribute", "IdentityAttribute", "CustomAttribute", "CondIfAttribute", "WhileLoopAttribute", + "VariableAttribute", "VariableWriteAttribute", "VariableReadAttribute", "ConstShapeAttribute", nullptr}; return names; } inline const char *EnumNameAttribute(Attribute e) { - if ( ::flatbuffers::IsOutRange(e, Attribute::NONE, Attribute::FFTAttribute) ) return ""; + if ( ::flatbuffers::IsOutRange(e, Attribute::NONE, Attribute::ConstShapeAttribute) ) return ""; const size_t index = static_cast(e); return EnumNamesAttribute()[index]; } @@ -405,57 +819,63 @@ struct AttributeTraits }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::PoolAttribute; + static const Attribute enum_value = Attribute::ArgMaxAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::ConvAttribute; + static const Attribute enum_value = Attribute::AvgPool2dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::TransposeConvAttribute; + static const Attribute enum_value = Attribute::Conv2dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::PadAttribute; + static const Attribute enum_value = Attribute::Conv3dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::AxisAttribute; + static const Attribute enum_value = Attribute::DepthwiseConv2dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::ReshapeAttribute; + static const Attribute enum_value = Attribute::FFT2dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::SliceAttribute; + static const Attribute enum_value = Attribute::MatMulAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::TileAttribute; + static const Attribute enum_value = Attribute::MaxPool2dAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::ResizeAttribute; + static const Attribute enum_value = Attribute::RFFT2dAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::TransposeConv2dAttribute; }; template<> @@ -465,15 +885,27 @@ struct AttributeTraits }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::RescaleAttribute; + static const Attribute enum_value = Attribute::ErfAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::MulAttribute; + static const Attribute enum_value = Attribute::SigmoidAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::TanhAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::AddAttribute; }; template<> @@ -483,415 +915,2519 @@ struct AttributeTraits }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::CondIfAttribute; + static const Attribute enum_value = Attribute::BitwiseAndAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::WhileLoopAttribute; + static const Attribute enum_value = Attribute::BitwiseOrAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::TransposeAttribute; + static const Attribute enum_value = Attribute::BitwiseXorAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::TableAttribute; + static const Attribute enum_value = Attribute::IntDivAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::MatMulAttribute; + static const Attribute enum_value = Attribute::LogicalAndAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::FullyConnectedAttribute; + static const Attribute enum_value = Attribute::LogicalLeftShiftAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::NegateAttribute; + static const Attribute enum_value = Attribute::LogicalRightShiftAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::CustomAttribute; + static const Attribute enum_value = Attribute::LogicalOrAttribute; }; template<> -struct AttributeTraits +struct AttributeTraits { - static const Attribute enum_value = Attribute::FFTAttribute; + static const Attribute enum_value = Attribute::LogicalXorAttribute; }; -bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, Attribute type); -bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, - const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::MaximumAttribute; +}; -struct PoolAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +template<> +struct AttributeTraits { - typedef PoolAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return PoolAttributeTypeTable(); } + static const Attribute enum_value = Attribute::MinimumAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::MulAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::PowAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::SubAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::TableAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::AbsAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::BitwiseNotAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::CeilAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ClzAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::CosAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ExpAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::FloorAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::LogAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::LogicalNotAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::NegateAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReciprocalAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::RsqrtAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::SinAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::SelectAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::EqualAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::GreaterAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::GreaterEqualAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceAllAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceAnyAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceMaxAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceMinAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceProductAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReduceSumAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ConcatAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::PadAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReshapeAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ReverseAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::SliceAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::TileAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::TransposeAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::GatherAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ScatterAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ResizeAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::CastAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::RescaleAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ConstAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::IdentityAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::CustomAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::CondIfAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::WhileLoopAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::VariableAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::VariableWriteAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::VariableReadAttribute; +}; + +template<> +struct AttributeTraits +{ + static const Attribute enum_value = Attribute::ConstShapeAttribute; +}; + +bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, Attribute type); +bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, + const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); + +struct ArgMaxAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ArgMaxAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ArgMaxAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_AXIS = 4, + VT_NAN_MODE = 6 + }; + int32_t axis() const { return GetField(VT_AXIS, 0); } + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct ArgMaxAttributeBuilder +{ + typedef ArgMaxAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement(ArgMaxAttribute::VT_AXIS, axis, 0); } + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(ArgMaxAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit ArgMaxAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateArgMaxAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + ArgMaxAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct AvgPool2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef AvgPool2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return AvgPool2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_KERNEL = 4, + VT_STRIDE = 6, + VT_PAD = 8, + VT_ACC_TYPE = 10 + }; + const ::flatbuffers::Vector *kernel() const + { + return GetPointer *>(VT_KERNEL); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + const ::flatbuffers::Vector *pad() const + { + return GetPointer *>(VT_PAD); + } + tosaFb::DType acc_type() const { return static_cast(GetField(VT_ACC_TYPE, 0)); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_KERNEL) && verifier.VerifyVector(kernel()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && VerifyField(verifier, VT_ACC_TYPE, 4) && verifier.EndTable(); + } +}; + +struct AvgPool2dAttributeBuilder +{ + typedef AvgPool2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_kernel(::flatbuffers::Offset<::flatbuffers::Vector> kernel) + { + fbb_.AddOffset(AvgPool2dAttribute::VT_KERNEL, kernel); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(AvgPool2dAttribute::VT_STRIDE, stride); + } + void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + { + fbb_.AddOffset(AvgPool2dAttribute::VT_PAD, pad); + } + void add_acc_type(tosaFb::DType acc_type) + { + fbb_.AddElement(AvgPool2dAttribute::VT_ACC_TYPE, static_cast(acc_type), 0); + } + explicit AvgPool2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateAvgPool2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> kernel = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + AvgPool2dAttributeBuilder builder_(_fbb); + builder_.add_acc_type(acc_type); + builder_.add_pad(pad); + builder_.add_stride(stride); + builder_.add_kernel(kernel); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateAvgPool2dAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel = nullptr, const std::vector *stride = nullptr, + const std::vector *pad = nullptr, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + auto kernel__ = kernel ? _fbb.CreateVector(*kernel) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + return tosaFb::CreateAvgPool2dAttribute(_fbb, kernel__, stride__, pad__, acc_type); +} + +struct Conv2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef Conv2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return Conv2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_PAD = 4, + VT_STRIDE = 6, + VT_DILATION = 8, + VT_LOCAL_BOUND = 10, + VT_ACC_TYPE = 12 + }; + const ::flatbuffers::Vector *pad() const + { + return GetPointer *>(VT_PAD); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + const ::flatbuffers::Vector *dilation() const + { + return GetPointer *>(VT_DILATION); + } + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + tosaFb::DType acc_type() const { return static_cast(GetField(VT_ACC_TYPE, 0)); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PAD) && verifier.VerifyVector(pad()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && VerifyField(verifier, VT_LOCAL_BOUND, 1) && + VerifyField(verifier, VT_ACC_TYPE, 4) && verifier.EndTable(); + } +}; + +struct Conv2dAttributeBuilder +{ + typedef Conv2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + { + fbb_.AddOffset(Conv2dAttribute::VT_PAD, pad); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(Conv2dAttribute::VT_STRIDE, stride); + } + void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector> dilation) + { + fbb_.AddOffset(Conv2dAttribute::VT_DILATION, dilation); + } + void add_local_bound(bool local_bound) + { + fbb_.AddElement(Conv2dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + void add_acc_type(tosaFb::DType acc_type) + { + fbb_.AddElement(Conv2dAttribute::VT_ACC_TYPE, static_cast(acc_type), 0); + } + explicit Conv2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConv2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> dilation = 0, bool local_bound = false, + tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + Conv2dAttributeBuilder builder_(_fbb); + builder_.add_acc_type(acc_type); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_pad(pad); + builder_.add_local_bound(local_bound); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateConv2dAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *pad = nullptr, const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, bool local_bound = false, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + return tosaFb::CreateConv2dAttribute(_fbb, pad__, stride__, dilation__, local_bound, acc_type); +} + +struct Conv3dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef Conv3dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return Conv3dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_PAD = 4, + VT_STRIDE = 6, + VT_DILATION = 8, + VT_LOCAL_BOUND = 10, + VT_ACC_TYPE = 12 + }; + const ::flatbuffers::Vector *pad() const + { + return GetPointer *>(VT_PAD); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + const ::flatbuffers::Vector *dilation() const + { + return GetPointer *>(VT_DILATION); + } + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + tosaFb::DType acc_type() const { return static_cast(GetField(VT_ACC_TYPE, 0)); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PAD) && verifier.VerifyVector(pad()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && VerifyField(verifier, VT_LOCAL_BOUND, 1) && + VerifyField(verifier, VT_ACC_TYPE, 4) && verifier.EndTable(); + } +}; + +struct Conv3dAttributeBuilder +{ + typedef Conv3dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + { + fbb_.AddOffset(Conv3dAttribute::VT_PAD, pad); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(Conv3dAttribute::VT_STRIDE, stride); + } + void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector> dilation) + { + fbb_.AddOffset(Conv3dAttribute::VT_DILATION, dilation); + } + void add_local_bound(bool local_bound) + { + fbb_.AddElement(Conv3dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + void add_acc_type(tosaFb::DType acc_type) + { + fbb_.AddElement(Conv3dAttribute::VT_ACC_TYPE, static_cast(acc_type), 0); + } + explicit Conv3dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConv3dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> dilation = 0, bool local_bound = false, + tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + Conv3dAttributeBuilder builder_(_fbb); + builder_.add_acc_type(acc_type); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_pad(pad); + builder_.add_local_bound(local_bound); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateConv3dAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *pad = nullptr, const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, bool local_bound = false, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + return tosaFb::CreateConv3dAttribute(_fbb, pad__, stride__, dilation__, local_bound, acc_type); +} + +struct DepthwiseConv2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef DepthwiseConv2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return DepthwiseConv2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_PAD = 4, + VT_STRIDE = 6, + VT_DILATION = 8, + VT_LOCAL_BOUND = 10, + VT_ACC_TYPE = 12 + }; + const ::flatbuffers::Vector *pad() const + { + return GetPointer *>(VT_PAD); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + const ::flatbuffers::Vector *dilation() const + { + return GetPointer *>(VT_DILATION); + } + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + tosaFb::DType acc_type() const { return static_cast(GetField(VT_ACC_TYPE, 0)); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PAD) && verifier.VerifyVector(pad()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && VerifyField(verifier, VT_LOCAL_BOUND, 1) && + VerifyField(verifier, VT_ACC_TYPE, 4) && verifier.EndTable(); + } +}; + +struct DepthwiseConv2dAttributeBuilder +{ + typedef DepthwiseConv2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + { + fbb_.AddOffset(DepthwiseConv2dAttribute::VT_PAD, pad); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(DepthwiseConv2dAttribute::VT_STRIDE, stride); + } + void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector> dilation) + { + fbb_.AddOffset(DepthwiseConv2dAttribute::VT_DILATION, dilation); + } + void add_local_bound(bool local_bound) + { + fbb_.AddElement(DepthwiseConv2dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + void add_acc_type(tosaFb::DType acc_type) + { + fbb_.AddElement(DepthwiseConv2dAttribute::VT_ACC_TYPE, static_cast(acc_type), 0); + } + explicit DepthwiseConv2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDepthwiseConv2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> dilation = 0, bool local_bound = false, + tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + DepthwiseConv2dAttributeBuilder builder_(_fbb); + builder_.add_acc_type(acc_type); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_pad(pad); + builder_.add_local_bound(local_bound); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDepthwiseConv2dAttributeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *pad = nullptr, const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, bool local_bound = false, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + return tosaFb::CreateDepthwiseConv2dAttribute(_fbb, pad__, stride__, dilation__, local_bound, acc_type); +} + +struct FFT2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef FFT2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return FFT2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_INVERSE = 4, + VT_LOCAL_BOUND = 6 + }; + bool inverse() const { return GetField(VT_INVERSE, 0) != 0; } + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_INVERSE, 1) && + VerifyField(verifier, VT_LOCAL_BOUND, 1) && verifier.EndTable(); + } +}; + +struct FFT2dAttributeBuilder +{ + typedef FFT2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_inverse(bool inverse) + { + fbb_.AddElement(FFT2dAttribute::VT_INVERSE, static_cast(inverse), 0); + } + void add_local_bound(bool local_bound) + { + fbb_.AddElement(FFT2dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + explicit FFT2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset +CreateFFT2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, bool inverse = false, bool local_bound = false) +{ + FFT2dAttributeBuilder builder_(_fbb); + builder_.add_local_bound(local_bound); + builder_.add_inverse(inverse); + return builder_.Finish(); +} + +struct MatMulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef MatMulAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MatMulAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct MatMulAttributeBuilder +{ + typedef MatMulAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit MatMulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMatMulAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + MatMulAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MaxPool2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef MaxPool2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MaxPool2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_KERNEL = 4, + VT_STRIDE = 6, + VT_PAD = 8, + VT_NAN_MODE = 10 + }; + const ::flatbuffers::Vector *kernel() const + { + return GetPointer *>(VT_KERNEL); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + const ::flatbuffers::Vector *pad() const + { + return GetPointer *>(VT_PAD); + } + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_KERNEL) && verifier.VerifyVector(kernel()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct MaxPool2dAttributeBuilder +{ + typedef MaxPool2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_kernel(::flatbuffers::Offset<::flatbuffers::Vector> kernel) + { + fbb_.AddOffset(MaxPool2dAttribute::VT_KERNEL, kernel); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(MaxPool2dAttribute::VT_STRIDE, stride); + } + void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + { + fbb_.AddOffset(MaxPool2dAttribute::VT_PAD, pad); + } + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(MaxPool2dAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit MaxPool2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMaxPool2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> kernel = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + MaxPool2dAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + builder_.add_pad(pad); + builder_.add_stride(stride); + builder_.add_kernel(kernel); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateMaxPool2dAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel = nullptr, const std::vector *stride = nullptr, + const std::vector *pad = nullptr, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + auto kernel__ = kernel ? _fbb.CreateVector(*kernel) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + return tosaFb::CreateMaxPool2dAttribute(_fbb, kernel__, stride__, pad__, nan_mode); +} + +struct RFFT2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef RFFT2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return RFFT2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_LOCAL_BOUND = 4 + }; + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_LOCAL_BOUND, 1) && verifier.EndTable(); + } +}; + +struct RFFT2dAttributeBuilder +{ + typedef RFFT2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_local_bound(bool local_bound) + { + fbb_.AddElement(RFFT2dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + explicit RFFT2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateRFFT2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, bool local_bound = false) +{ + RFFT2dAttributeBuilder builder_(_fbb); + builder_.add_local_bound(local_bound); + return builder_.Finish(); +} + +struct TransposeConv2dAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef TransposeConv2dAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TransposeConv2dAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_OUT_PAD = 4, + VT_STRIDE = 6, + VT_LOCAL_BOUND = 8, + VT_ACC_TYPE = 10 + }; + const ::flatbuffers::Vector *out_pad() const + { + return GetPointer *>(VT_OUT_PAD); + } + const ::flatbuffers::Vector *stride() const + { + return GetPointer *>(VT_STRIDE); + } + bool local_bound() const { return GetField(VT_LOCAL_BOUND, 0) != 0; } + tosaFb::DType acc_type() const { return static_cast(GetField(VT_ACC_TYPE, 0)); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_OUT_PAD) && verifier.VerifyVector(out_pad()) && + VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyField(verifier, VT_LOCAL_BOUND, 1) && + VerifyField(verifier, VT_ACC_TYPE, 4) && verifier.EndTable(); + } +}; + +struct TransposeConv2dAttributeBuilder +{ + typedef TransposeConv2dAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_out_pad(::flatbuffers::Offset<::flatbuffers::Vector> out_pad) + { + fbb_.AddOffset(TransposeConv2dAttribute::VT_OUT_PAD, out_pad); + } + void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + { + fbb_.AddOffset(TransposeConv2dAttribute::VT_STRIDE, stride); + } + void add_local_bound(bool local_bound) + { + fbb_.AddElement(TransposeConv2dAttribute::VT_LOCAL_BOUND, static_cast(local_bound), 0); + } + void add_acc_type(tosaFb::DType acc_type) + { + fbb_.AddElement(TransposeConv2dAttribute::VT_ACC_TYPE, static_cast(acc_type), 0); + } + explicit TransposeConv2dAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTransposeConv2dAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> out_pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, + bool local_bound = false, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + TransposeConv2dAttributeBuilder builder_(_fbb); + builder_.add_acc_type(acc_type); + builder_.add_stride(stride); + builder_.add_out_pad(out_pad); + builder_.add_local_bound(local_bound); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTransposeConv2dAttributeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *out_pad = nullptr, + const std::vector *stride = nullptr, bool local_bound = false, tosaFb::DType acc_type = tosaFb::DType::UNKNOWN) +{ + auto out_pad__ = out_pad ? _fbb.CreateVector(*out_pad) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + return tosaFb::CreateTransposeConv2dAttribute(_fbb, out_pad__, stride__, local_bound, acc_type); +} + +struct ClampAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ClampAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ClampAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_MIN_VAL = 4, + VT_MAX_VAL = 6, + VT_NAN_MODE = 8 + }; + const ::flatbuffers::Vector *min_val() const + { + return GetPointer *>(VT_MIN_VAL); + } + const ::flatbuffers::Vector *max_val() const + { + return GetPointer *>(VT_MAX_VAL); + } + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN_VAL) && verifier.VerifyVector(min_val()) && VerifyOffset(verifier, VT_MAX_VAL) && + verifier.VerifyVector(max_val()) && VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct ClampAttributeBuilder +{ + typedef ClampAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_min_val(::flatbuffers::Offset<::flatbuffers::Vector> min_val) + { + fbb_.AddOffset(ClampAttribute::VT_MIN_VAL, min_val); + } + void add_max_val(::flatbuffers::Offset<::flatbuffers::Vector> max_val) + { + fbb_.AddOffset(ClampAttribute::VT_MAX_VAL, max_val); + } + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(ClampAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit ClampAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateClampAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> min_val = 0, ::flatbuffers::Offset<::flatbuffers::Vector> max_val = 0, + tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + ClampAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + builder_.add_max_val(max_val); + builder_.add_min_val(min_val); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateClampAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *min_val = nullptr, const std::vector *max_val = nullptr, + tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + if ( min_val ) + { + _fbb.ForceVectorAlignment(min_val->size(), sizeof(uint8_t), 8); + } + auto min_val__ = min_val ? _fbb.CreateVector(*min_val) : 0; + if ( max_val ) + { + _fbb.ForceVectorAlignment(max_val->size(), sizeof(uint8_t), 8); + } + auto max_val__ = max_val ? _fbb.CreateVector(*max_val) : 0; + return tosaFb::CreateClampAttribute(_fbb, min_val__, max_val__, nan_mode); +} + +struct ErfAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ErfAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ErfAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct ErfAttributeBuilder +{ + typedef ErfAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ErfAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateErfAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + ErfAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SigmoidAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef SigmoidAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SigmoidAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct SigmoidAttributeBuilder +{ + typedef SigmoidAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SigmoidAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSigmoidAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + SigmoidAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TanhAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef TanhAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TanhAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct TanhAttributeBuilder +{ + typedef TanhAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit TanhAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTanhAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + TanhAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AddAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef AddAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return AddAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct AddAttributeBuilder +{ + typedef AddAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit AddAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateAddAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + AddAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ArithmeticRightShiftAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ArithmeticRightShiftAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_ROUND = 4 + }; + bool round() const { return GetField(VT_ROUND, 0) != 0; } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_ROUND, 1) && verifier.EndTable(); + } +}; + +struct ArithmeticRightShiftAttributeBuilder +{ + typedef ArithmeticRightShiftAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_round(bool round) + { + fbb_.AddElement(ArithmeticRightShiftAttribute::VT_ROUND, static_cast(round), 0); + } + explicit ArithmeticRightShiftAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset +CreateArithmeticRightShiftAttribute(::flatbuffers::FlatBufferBuilder &_fbb, bool round = false) +{ + ArithmeticRightShiftAttributeBuilder builder_(_fbb); + builder_.add_round(round); + return builder_.Finish(); +} + +struct BitwiseAndAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef BitwiseAndAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return BitwiseAndAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct BitwiseAndAttributeBuilder +{ + typedef BitwiseAndAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitwiseAndAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBitwiseAndAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + BitwiseAndAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BitwiseOrAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef BitwiseOrAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return BitwiseOrAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct BitwiseOrAttributeBuilder +{ + typedef BitwiseOrAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitwiseOrAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBitwiseOrAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + BitwiseOrAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BitwiseXorAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef BitwiseXorAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return BitwiseXorAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct BitwiseXorAttributeBuilder +{ + typedef BitwiseXorAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitwiseXorAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBitwiseXorAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + BitwiseXorAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct IntDivAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef IntDivAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return IntDivAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct IntDivAttributeBuilder +{ + typedef IntDivAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit IntDivAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateIntDivAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + IntDivAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalAndAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalAndAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalAndAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalAndAttributeBuilder +{ + typedef LogicalAndAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalAndAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogicalAndAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalAndAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalLeftShiftAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalLeftShiftAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalLeftShiftAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalLeftShiftAttributeBuilder +{ + typedef LogicalLeftShiftAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalLeftShiftAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogicalLeftShiftAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalLeftShiftAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalRightShiftAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalRightShiftAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalRightShiftAttributeBuilder +{ + typedef LogicalRightShiftAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalRightShiftAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogicalRightShiftAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalRightShiftAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalOrAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalOrAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalOrAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalOrAttributeBuilder +{ + typedef LogicalOrAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalOrAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogicalOrAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalOrAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalXorAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalXorAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalXorAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalXorAttributeBuilder +{ + typedef LogicalXorAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalXorAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogicalXorAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalXorAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MaximumAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef MaximumAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MaximumAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_NAN_MODE = 4 + }; + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct MaximumAttributeBuilder +{ + typedef MaximumAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(MaximumAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit MaximumAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMaximumAttribute( + ::flatbuffers::FlatBufferBuilder &_fbb, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + MaximumAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + return builder_.Finish(); +} + +struct MinimumAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef MinimumAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MinimumAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PAD = 4, - VT_KERNEL = 6, - VT_STRIDE = 8, - VT_INPUT_ZP = 10, - VT_OUTPUT_ZP = 12, - VT_ACCUM_DTYPE = 14 - }; - const ::flatbuffers::Vector *pad() const + VT_NAN_MODE = 4 + }; + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct MinimumAttributeBuilder +{ + typedef MinimumAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(MinimumAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit MinimumAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMinimumAttribute( + ::flatbuffers::FlatBufferBuilder &_fbb, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + MinimumAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + return builder_.Finish(); +} + +struct MulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef MulAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MulAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct MulAttributeBuilder +{ + typedef MulAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit MulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMulAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + MulAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PowAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef PowAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return PowAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct PowAttributeBuilder +{ + typedef PowAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit PowAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreatePowAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + PowAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SubAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef SubAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SubAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct SubAttributeBuilder +{ + typedef SubAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SubAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSubAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + SubAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TableAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef TableAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TableAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct TableAttributeBuilder +{ + typedef TableAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit TableAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTableAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + TableAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AbsAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef AbsAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return AbsAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct AbsAttributeBuilder +{ + typedef AbsAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit AbsAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateAbsAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + AbsAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BitwiseNotAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef BitwiseNotAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return BitwiseNotAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct BitwiseNotAttributeBuilder +{ + typedef BitwiseNotAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitwiseNotAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBitwiseNotAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + BitwiseNotAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CeilAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef CeilAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CeilAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct CeilAttributeBuilder +{ + typedef CeilAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit CeilAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateCeilAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + CeilAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ClzAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ClzAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ClzAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct ClzAttributeBuilder +{ + typedef ClzAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ClzAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateClzAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + ClzAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CosAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef CosAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CosAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct CosAttributeBuilder +{ + typedef CosAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit CosAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateCosAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + CosAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ExpAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ExpAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ExpAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct ExpAttributeBuilder +{ + typedef ExpAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ExpAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateExpAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + ExpAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef FloorAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return FloorAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct FloorAttributeBuilder +{ + typedef FloorAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit FloorAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateFloorAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + FloorAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogAttributeBuilder +{ + typedef LogAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLogAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalNotAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef LogicalNotAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return LogicalNotAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct LogicalNotAttributeBuilder +{ + typedef LogicalNotAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalNotAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { - return GetPointer *>(VT_PAD); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const ::flatbuffers::Vector *kernel() const +}; + +inline ::flatbuffers::Offset CreateLogicalNotAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalNotAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NegateAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef NegateAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return NegateAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct NegateAttributeBuilder +{ + typedef NegateAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit NegateAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - return GetPointer *>(VT_KERNEL); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const ::flatbuffers::Vector *stride() const +}; + +inline ::flatbuffers::Offset CreateNegateAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + NegateAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReciprocalAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ReciprocalAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReciprocalAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct ReciprocalAttributeBuilder +{ + typedef ReciprocalAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ReciprocalAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - return GetPointer *>(VT_STRIDE); + start_ = fbb_.StartTable(); } - int32_t input_zp() const { return GetField(VT_INPUT_ZP, 0); } - int32_t output_zp() const { return GetField(VT_OUTPUT_ZP, 0); } - tosaFb::DType accum_dtype() const { return static_cast(GetField(VT_ACCUM_DTYPE, 0)); } - bool Verify(::flatbuffers::Verifier &verifier) const + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateReciprocalAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + ReciprocalAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct RsqrtAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef RsqrtAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return RsqrtAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct RsqrtAttributeBuilder +{ + typedef RsqrtAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit RsqrtAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PAD) && verifier.VerifyVector(pad()) && VerifyOffset(verifier, VT_KERNEL) && - verifier.VerifyVector(kernel()) && VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && - VerifyField(verifier, VT_INPUT_ZP, 4) && VerifyField(verifier, VT_OUTPUT_ZP, 4) && - VerifyField(verifier, VT_ACCUM_DTYPE, 4) && verifier.EndTable(); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } }; -struct PoolAttributeBuilder +inline ::flatbuffers::Offset CreateRsqrtAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + RsqrtAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SinAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef SinAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SinAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct SinAttributeBuilder { - typedef PoolAttribute Table; + typedef SinAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) + explicit SinAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - fbb_.AddOffset(PoolAttribute::VT_PAD, pad); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_kernel(::flatbuffers::Offset<::flatbuffers::Vector> kernel) +}; + +inline ::flatbuffers::Offset CreateSinAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + SinAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SelectAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef SelectAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SelectAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct SelectAttributeBuilder +{ + typedef SelectAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SelectAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - fbb_.AddOffset(PoolAttribute::VT_KERNEL, kernel); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) +}; + +inline ::flatbuffers::Offset CreateSelectAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + SelectAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct EqualAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef EqualAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return EqualAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct EqualAttributeBuilder +{ + typedef EqualAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit EqualAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - fbb_.AddOffset(PoolAttribute::VT_STRIDE, stride); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_input_zp(int32_t input_zp) { fbb_.AddElement(PoolAttribute::VT_INPUT_ZP, input_zp, 0); } - void add_output_zp(int32_t output_zp) { fbb_.AddElement(PoolAttribute::VT_OUTPUT_ZP, output_zp, 0); } - void add_accum_dtype(tosaFb::DType accum_dtype) +}; + +inline ::flatbuffers::Offset CreateEqualAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + EqualAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GreaterAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef GreaterAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return GreaterAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct GreaterAttributeBuilder +{ + typedef GreaterAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GreaterAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddElement(PoolAttribute::VT_ACCUM_DTYPE, static_cast(accum_dtype), 0); + start_ = fbb_.StartTable(); } - explicit PoolAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreatePoolAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> kernel = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, int32_t input_zp = 0, int32_t output_zp = 0, - tosaFb::DType accum_dtype = tosaFb::DType::UNKNOWN) +inline ::flatbuffers::Offset CreateGreaterAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - PoolAttributeBuilder builder_(_fbb); - builder_.add_accum_dtype(accum_dtype); - builder_.add_output_zp(output_zp); - builder_.add_input_zp(input_zp); - builder_.add_stride(stride); - builder_.add_kernel(kernel); - builder_.add_pad(pad); + GreaterAttributeBuilder builder_(_fbb); return builder_.Finish(); } -inline ::flatbuffers::Offset CreatePoolAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *pad = nullptr, const std::vector *kernel = nullptr, const std::vector *stride = nullptr, - int32_t input_zp = 0, int32_t output_zp = 0, tosaFb::DType accum_dtype = tosaFb::DType::UNKNOWN) +struct GreaterEqualAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; - auto kernel__ = kernel ? _fbb.CreateVector(*kernel) : 0; - auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; - return tosaFb::CreatePoolAttribute(_fbb, pad__, kernel__, stride__, input_zp, output_zp, accum_dtype); + typedef GreaterEqualAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return GreaterEqualAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct GreaterEqualAttributeBuilder +{ + typedef GreaterEqualAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GreaterEqualAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateGreaterEqualAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + GreaterEqualAttributeBuilder builder_(_fbb); + return builder_.Finish(); } -struct ConvAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct ReduceAllAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef ConvAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ConvAttributeTypeTable(); } + typedef ReduceAllAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceAllAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PAD = 4, - VT_STRIDE = 6, - VT_DILATION = 8, - VT_INPUT_ZP = 10, - VT_WEIGHT_ZP = 12 + VT_AXIS = 4 }; - const ::flatbuffers::Vector *pad() const + int32_t axis() const { return GetField(VT_AXIS, 0); } + bool Verify(::flatbuffers::Verifier &verifier) const { - return GetPointer *>(VT_PAD); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } - const ::flatbuffers::Vector *stride() const +}; + +struct ReduceAllAttributeBuilder +{ + typedef ReduceAllAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement(ReduceAllAttribute::VT_AXIS, axis, 0); } + explicit ReduceAllAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - return GetPointer *>(VT_STRIDE); + start_ = fbb_.StartTable(); } - const ::flatbuffers::Vector *dilation() const + ::flatbuffers::Offset Finish() { - return GetPointer *>(VT_DILATION); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - int32_t input_zp() const { return GetField(VT_INPUT_ZP, 0); } - int32_t weight_zp() const { return GetField(VT_WEIGHT_ZP, 0); } +}; + +inline ::flatbuffers::Offset CreateReduceAllAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) +{ + ReduceAllAttributeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct ReduceAnyAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ReduceAnyAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceAnyAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField(VT_AXIS, 0); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PAD) && verifier.VerifyVector(pad()) && - VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_DILATION) && - verifier.VerifyVector(dilation()) && VerifyField(verifier, VT_INPUT_ZP, 4) && - VerifyField(verifier, VT_WEIGHT_ZP, 4) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } }; -struct ConvAttributeBuilder +struct ReduceAnyAttributeBuilder { - typedef ConvAttribute Table; + typedef ReduceAnyAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_pad(::flatbuffers::Offset<::flatbuffers::Vector> pad) - { - fbb_.AddOffset(ConvAttribute::VT_PAD, pad); - } - void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) - { - fbb_.AddOffset(ConvAttribute::VT_STRIDE, stride); - } - void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector> dilation) + void add_axis(int32_t axis) { fbb_.AddElement(ReduceAnyAttribute::VT_AXIS, axis, 0); } + explicit ReduceAnyAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddOffset(ConvAttribute::VT_DILATION, dilation); + start_ = fbb_.StartTable(); } - void add_input_zp(int32_t input_zp) { fbb_.AddElement(ConvAttribute::VT_INPUT_ZP, input_zp, 0); } - void add_weight_zp(int32_t weight_zp) { fbb_.AddElement(ConvAttribute::VT_WEIGHT_ZP, weight_zp, 0); } - explicit ConvAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateConvAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> dilation = 0, int32_t input_zp = 0, int32_t weight_zp = 0) +inline ::flatbuffers::Offset CreateReduceAnyAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) { - ConvAttributeBuilder builder_(_fbb); - builder_.add_weight_zp(weight_zp); - builder_.add_input_zp(input_zp); - builder_.add_dilation(dilation); - builder_.add_stride(stride); - builder_.add_pad(pad); + ReduceAnyAttributeBuilder builder_(_fbb); + builder_.add_axis(axis); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateConvAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *pad = nullptr, const std::vector *stride = nullptr, - const std::vector *dilation = nullptr, int32_t input_zp = 0, int32_t weight_zp = 0) -{ - auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; - auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; - auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; - return tosaFb::CreateConvAttribute(_fbb, pad__, stride__, dilation__, input_zp, weight_zp); -} - -struct TransposeConvAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct ReduceMaxAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef TransposeConvAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TransposeConvAttributeTypeTable(); } + typedef ReduceMaxAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceMaxAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUT_PAD = 4, - VT_STRIDE = 6, - VT_OUTPUT_SHAPE = 8, - VT_INPUT_ZP = 10, - VT_WEIGHT_ZP = 12 + VT_AXIS = 4, + VT_NAN_MODE = 6 }; - const ::flatbuffers::Vector *out_pad() const + int32_t axis() const { return GetField(VT_AXIS, 0); } + tosaFb::NanPropagationMode nan_mode() const { - return GetPointer *>(VT_OUT_PAD); + return static_cast(GetField(VT_NAN_MODE, 0)); } - const ::flatbuffers::Vector *stride() const + bool Verify(::flatbuffers::Verifier &verifier) const { - return GetPointer *>(VT_STRIDE); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); + } +}; + +struct ReduceMaxAttributeBuilder +{ + typedef ReduceMaxAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement(ReduceMaxAttribute::VT_AXIS, axis, 0); } + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) + { + fbb_.AddElement(ReduceMaxAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); + } + explicit ReduceMaxAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const ::flatbuffers::Vector *output_shape() const +}; + +inline ::flatbuffers::Offset CreateReduceMaxAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + ReduceMaxAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct ReduceMinAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ReduceMinAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceMinAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - return GetPointer *>(VT_OUTPUT_SHAPE); + VT_AXIS = 4, + VT_NAN_MODE = 6 + }; + int32_t axis() const { return GetField(VT_AXIS, 0); } + tosaFb::NanPropagationMode nan_mode() const + { + return static_cast(GetField(VT_NAN_MODE, 0)); } - int32_t input_zp() const { return GetField(VT_INPUT_ZP, 0); } - int32_t weight_zp() const { return GetField(VT_WEIGHT_ZP, 0); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_OUT_PAD) && verifier.VerifyVector(out_pad()) && - VerifyOffset(verifier, VT_STRIDE) && verifier.VerifyVector(stride()) && VerifyOffset(verifier, VT_OUTPUT_SHAPE) && - verifier.VerifyVector(output_shape()) && VerifyField(verifier, VT_INPUT_ZP, 4) && - VerifyField(verifier, VT_WEIGHT_ZP, 4) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_NAN_MODE, 4) && verifier.EndTable(); } }; -struct TransposeConvAttributeBuilder +struct ReduceMinAttributeBuilder { - typedef TransposeConvAttribute Table; + typedef ReduceMinAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_out_pad(::flatbuffers::Offset<::flatbuffers::Vector> out_pad) + void add_axis(int32_t axis) { fbb_.AddElement(ReduceMinAttribute::VT_AXIS, axis, 0); } + void add_nan_mode(tosaFb::NanPropagationMode nan_mode) { - fbb_.AddOffset(TransposeConvAttribute::VT_OUT_PAD, out_pad); + fbb_.AddElement(ReduceMinAttribute::VT_NAN_MODE, static_cast(nan_mode), 0); } - void add_stride(::flatbuffers::Offset<::flatbuffers::Vector> stride) + explicit ReduceMinAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddOffset(TransposeConvAttribute::VT_STRIDE, stride); + start_ = fbb_.StartTable(); } - void add_output_shape(::flatbuffers::Offset<::flatbuffers::Vector> output_shape) + ::flatbuffers::Offset Finish() { - fbb_.AddOffset(TransposeConvAttribute::VT_OUTPUT_SHAPE, output_shape); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_input_zp(int32_t input_zp) { fbb_.AddElement(TransposeConvAttribute::VT_INPUT_ZP, input_zp, 0); } - void add_weight_zp(int32_t weight_zp) +}; + +inline ::flatbuffers::Offset CreateReduceMinAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, tosaFb::NanPropagationMode nan_mode = tosaFb::NanPropagationMode::UNKNOWN) +{ + ReduceMinAttributeBuilder builder_(_fbb); + builder_.add_nan_mode(nan_mode); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct ReduceProductAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ReduceProductAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceProductAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField(VT_AXIS, 0); } + bool Verify(::flatbuffers::Verifier &verifier) const { - fbb_.AddElement(TransposeConvAttribute::VT_WEIGHT_ZP, weight_zp, 0); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } - explicit TransposeConvAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) +}; + +struct ReduceProductAttributeBuilder +{ + typedef ReduceProductAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement(ReduceProductAttribute::VT_AXIS, axis, 0); } + explicit ReduceProductAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateTransposeConvAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> out_pad = 0, ::flatbuffers::Offset<::flatbuffers::Vector> stride = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> output_shape = 0, int32_t input_zp = 0, int32_t weight_zp = 0) +inline ::flatbuffers::Offset CreateReduceProductAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) { - TransposeConvAttributeBuilder builder_(_fbb); - builder_.add_weight_zp(weight_zp); - builder_.add_input_zp(input_zp); - builder_.add_output_shape(output_shape); - builder_.add_stride(stride); - builder_.add_out_pad(out_pad); + ReduceProductAttributeBuilder builder_(_fbb); + builder_.add_axis(axis); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateTransposeConvAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *out_pad = nullptr, const std::vector *stride = nullptr, - const std::vector *output_shape = nullptr, int32_t input_zp = 0, int32_t weight_zp = 0) -{ - auto out_pad__ = out_pad ? _fbb.CreateVector(*out_pad) : 0; - auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; - auto output_shape__ = output_shape ? _fbb.CreateVector(*output_shape) : 0; - return tosaFb::CreateTransposeConvAttribute(_fbb, out_pad__, stride__, output_shape__, input_zp, weight_zp); -} - -struct PadAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct ReduceSumAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef PadAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return PadAttributeTypeTable(); } + typedef ReduceSumAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReduceSumAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_PAD_CONST_INT = 6, - VT_PAD_CONST_FP = 8 + VT_AXIS = 4 }; - const ::flatbuffers::Vector *padding() const - { - return GetPointer *>(VT_PADDING); - } - int32_t pad_const_int() const { return GetField(VT_PAD_CONST_INT, 0); } - const ::flatbuffers::Vector *pad_const_fp() const - { - return GetPointer *>(VT_PAD_CONST_FP); - } + int32_t axis() const { return GetField(VT_AXIS, 0); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PADDING) && verifier.VerifyVector(padding()) && - VerifyField(verifier, VT_PAD_CONST_INT, 4) && VerifyOffset(verifier, VT_PAD_CONST_FP) && - verifier.VerifyVector(pad_const_fp()) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } }; -struct PadAttributeBuilder +struct ReduceSumAttributeBuilder { - typedef PadAttribute Table; + typedef ReduceSumAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_padding(::flatbuffers::Offset<::flatbuffers::Vector> padding) - { - fbb_.AddOffset(PadAttribute::VT_PADDING, padding); - } - void add_pad_const_int(int32_t pad_const_int) + void add_axis(int32_t axis) { fbb_.AddElement(ReduceSumAttribute::VT_AXIS, axis, 0); } + explicit ReduceSumAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddElement(PadAttribute::VT_PAD_CONST_INT, pad_const_int, 0); - } - void add_pad_const_fp(::flatbuffers::Offset<::flatbuffers::Vector> pad_const_fp) - { - fbb_.AddOffset(PadAttribute::VT_PAD_CONST_FP, pad_const_fp); + start_ = fbb_.StartTable(); } - explicit PadAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreatePadAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> padding = 0, int32_t pad_const_int = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> pad_const_fp = 0) +inline ::flatbuffers::Offset CreateReduceSumAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) { - PadAttributeBuilder builder_(_fbb); - builder_.add_pad_const_fp(pad_const_fp); - builder_.add_pad_const_int(pad_const_int); - builder_.add_padding(padding); + ReduceSumAttributeBuilder builder_(_fbb); + builder_.add_axis(axis); return builder_.Finish(); } -inline ::flatbuffers::Offset CreatePadAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *padding = nullptr, int32_t pad_const_int = 0, const std::vector *pad_const_fp = nullptr) +struct ConcatAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - auto padding__ = padding ? _fbb.CreateVector(*padding) : 0; - if ( pad_const_fp ) - { - _fbb.ForceVectorAlignment(pad_const_fp->size(), sizeof(uint8_t), 8); - } - auto pad_const_fp__ = pad_const_fp ? _fbb.CreateVector(*pad_const_fp) : 0; - return tosaFb::CreatePadAttribute(_fbb, padding__, pad_const_int, pad_const_fp__); -} - -struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef AxisAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return AxisAttributeTypeTable(); } + typedef ConcatAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ConcatAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_AXIS = 4 @@ -903,45 +3439,60 @@ struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table } }; -struct AxisAttributeBuilder +struct ConcatAttributeBuilder { - typedef AxisAttribute Table; + typedef ConcatAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { fbb_.AddElement(AxisAttribute::VT_AXIS, axis, 0); } - explicit AxisAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + void add_axis(int32_t axis) { fbb_.AddElement(ConcatAttribute::VT_AXIS, axis, 0); } + explicit ConcatAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateAxisAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) +inline ::flatbuffers::Offset CreateConcatAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) { - AxisAttributeBuilder builder_(_fbb); + ConcatAttributeBuilder builder_(_fbb); builder_.add_axis(axis); return builder_.Finish(); } +struct PadAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef PadAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return PadAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct PadAttributeBuilder +{ + typedef PadAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit PadAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreatePadAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + PadAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + struct ReshapeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { typedef ReshapeAttributeBuilder Builder; static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReshapeAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_NEW_SHAPE = 4 - }; - const ::flatbuffers::Vector *new_shape() const - { - return GetPointer *>(VT_NEW_SHAPE); - } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) && - verifier.VerifyVector(new_shape()) && verifier.EndTable(); - } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; struct ReshapeAttributeBuilder @@ -949,10 +3500,6 @@ struct ReshapeAttributeBuilder typedef ReshapeAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_new_shape(::flatbuffers::Offset<::flatbuffers::Vector> new_shape) - { - fbb_.AddOffset(ReshapeAttribute::VT_NEW_SHAPE, new_shape); - } explicit ReshapeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -965,58 +3512,64 @@ struct ReshapeAttributeBuilder } }; -inline ::flatbuffers::Offset CreateReshapeAttribute( - ::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector> new_shape = 0) +inline ::flatbuffers::Offset CreateReshapeAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { ReshapeAttributeBuilder builder_(_fbb); - builder_.add_new_shape(new_shape); return builder_.Finish(); } -inline ::flatbuffers::Offset -CreateReshapeAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *new_shape = nullptr) +struct ReverseAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; - return tosaFb::CreateReshapeAttribute(_fbb, new_shape__); -} - -struct SliceAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef SliceAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SliceAttributeTypeTable(); } + typedef ReverseAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ReverseAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_START = 4, - VT_SIZE = 6 + VT_AXIS = 4 }; - const ::flatbuffers::Vector *start() const + int32_t axis() const { return GetField(VT_AXIS, 0); } + bool Verify(::flatbuffers::Verifier &verifier) const { - return GetPointer *>(VT_START); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } - const ::flatbuffers::Vector *size() const +}; + +struct ReverseAttributeBuilder +{ + typedef ReverseAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement(ReverseAttribute::VT_AXIS, axis, 0); } + explicit ReverseAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - return GetPointer *>(VT_SIZE); + start_ = fbb_.StartTable(); } - bool Verify(::flatbuffers::Verifier &verifier) const + ::flatbuffers::Offset Finish() { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_START) && verifier.VerifyVector(start()) && - VerifyOffset(verifier, VT_SIZE) && verifier.VerifyVector(size()) && verifier.EndTable(); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } }; +inline ::flatbuffers::Offset CreateReverseAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0) +{ + ReverseAttributeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct SliceAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef SliceAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return SliceAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + struct SliceAttributeBuilder { typedef SliceAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_start(::flatbuffers::Offset<::flatbuffers::Vector> start) - { - fbb_.AddOffset(SliceAttribute::VT_START, start); - } - void add_size(::flatbuffers::Offset<::flatbuffers::Vector> size) - { - fbb_.AddOffset(SliceAttribute::VT_SIZE, size); - } explicit SliceAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -1026,40 +3579,17 @@ struct SliceAttributeBuilder } }; -inline ::flatbuffers::Offset CreateSliceAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> start = 0, ::flatbuffers::Offset<::flatbuffers::Vector> size = 0) +inline ::flatbuffers::Offset CreateSliceAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { SliceAttributeBuilder builder_(_fbb); - builder_.add_size(size); - builder_.add_start(start); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateSliceAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *start = nullptr, const std::vector *size = nullptr) -{ - auto start__ = start ? _fbb.CreateVector(*start) : 0; - auto size__ = size ? _fbb.CreateVector(*size) : 0; - return tosaFb::CreateSliceAttribute(_fbb, start__, size__); -} - struct TileAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { typedef TileAttributeBuilder Builder; static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TileAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_MULTIPLES = 4 - }; - const ::flatbuffers::Vector *multiples() const - { - return GetPointer *>(VT_MULTIPLES); - } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MULTIPLES) && - verifier.VerifyVector(multiples()) && verifier.EndTable(); - } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; struct TileAttributeBuilder @@ -1067,10 +3597,6 @@ struct TileAttributeBuilder typedef TileAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_multiples(::flatbuffers::Offset<::flatbuffers::Vector> multiples) - { - fbb_.AddOffset(TileAttribute::VT_MULTIPLES, multiples); - } explicit TileAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -1080,184 +3606,190 @@ struct TileAttributeBuilder } }; -inline ::flatbuffers::Offset CreateTileAttribute( - ::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector> multiples = 0) +inline ::flatbuffers::Offset CreateTileAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { TileAttributeBuilder builder_(_fbb); - builder_.add_multiples(multiples); return builder_.Finish(); } -inline ::flatbuffers::Offset -CreateTileAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *multiples = nullptr) -{ - auto multiples__ = multiples ? _fbb.CreateVector(*multiples) : 0; - return tosaFb::CreateTileAttribute(_fbb, multiples__); -} - -struct ResizeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct TransposeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef ResizeAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ResizeAttributeTypeTable(); } + typedef TransposeAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TransposeAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SCALE = 4, - VT_OFFSET = 6, - VT_BORDER = 8, - VT_MODE = 10 + VT_PERMS = 4 }; - const ::flatbuffers::Vector *scale() const - { - return GetPointer *>(VT_SCALE); - } - const ::flatbuffers::Vector *offset() const - { - return GetPointer *>(VT_OFFSET); - } - const ::flatbuffers::Vector *border() const + const ::flatbuffers::Vector *perms() const { - return GetPointer *>(VT_BORDER); + return GetPointer *>(VT_PERMS); } - tosaFb::ResizeMode mode() const { return static_cast(GetField(VT_MODE, 0)); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SCALE) && verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_OFFSET) && verifier.VerifyVector(offset()) && VerifyOffset(verifier, VT_BORDER) && - verifier.VerifyVector(border()) && VerifyField(verifier, VT_MODE, 4) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PERMS) && verifier.VerifyVector(perms()) && + verifier.EndTable(); } }; -struct ResizeAttributeBuilder +struct TransposeAttributeBuilder { - typedef ResizeAttribute Table; + typedef TransposeAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_scale(::flatbuffers::Offset<::flatbuffers::Vector> scale) - { - fbb_.AddOffset(ResizeAttribute::VT_SCALE, scale); - } - void add_offset(::flatbuffers::Offset<::flatbuffers::Vector> offset) - { - fbb_.AddOffset(ResizeAttribute::VT_OFFSET, offset); - } - void add_border(::flatbuffers::Offset<::flatbuffers::Vector> border) + void add_perms(::flatbuffers::Offset<::flatbuffers::Vector> perms) { - fbb_.AddOffset(ResizeAttribute::VT_BORDER, border); + fbb_.AddOffset(TransposeAttribute::VT_PERMS, perms); } - void add_mode(tosaFb::ResizeMode mode) + explicit TransposeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddElement(ResizeAttribute::VT_MODE, static_cast(mode), 0); + start_ = fbb_.StartTable(); } - explicit ResizeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateResizeAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::Vector> scale = 0, ::flatbuffers::Offset<::flatbuffers::Vector> offset = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> border = 0, tosaFb::ResizeMode mode = tosaFb::ResizeMode::UNKNOWN) +inline ::flatbuffers::Offset CreateTransposeAttribute( + ::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector> perms = 0) { - ResizeAttributeBuilder builder_(_fbb); - builder_.add_mode(mode); - builder_.add_border(border); - builder_.add_offset(offset); - builder_.add_scale(scale); + TransposeAttributeBuilder builder_(_fbb); + builder_.add_perms(perms); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateResizeAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *scale = nullptr, const std::vector *offset = nullptr, - const std::vector *border = nullptr, tosaFb::ResizeMode mode = tosaFb::ResizeMode::UNKNOWN) +inline ::flatbuffers::Offset +CreateTransposeAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *perms = nullptr) { - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto offset__ = offset ? _fbb.CreateVector(*offset) : 0; - auto border__ = border ? _fbb.CreateVector(*border) : 0; - return tosaFb::CreateResizeAttribute(_fbb, scale__, offset__, border__, mode); + auto perms__ = perms ? _fbb.CreateVector(*perms) : 0; + return tosaFb::CreateTransposeAttribute(_fbb, perms__); } -struct ClampAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct GatherAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef ClampAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ClampAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + typedef GatherAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return GatherAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct GatherAttributeBuilder +{ + typedef GatherAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GatherAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - VT_MIN_INT = 4, - VT_MAX_INT = 6, - VT_MIN_FP = 8, - VT_MAX_FP = 10 - }; - int32_t min_int() const { return GetField(VT_MIN_INT, 0); } - int32_t max_int() const { return GetField(VT_MAX_INT, 0); } - const ::flatbuffers::Vector *min_fp() const + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateGatherAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + GatherAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ScatterAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ScatterAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ScatterAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct ScatterAttributeBuilder +{ + typedef ScatterAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ScatterAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - return GetPointer *>(VT_MIN_FP); + start_ = fbb_.StartTable(); } - const ::flatbuffers::Vector *max_fp() const + ::flatbuffers::Offset Finish() { - return GetPointer *>(VT_MAX_FP); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } +}; + +inline ::flatbuffers::Offset CreateScatterAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + ScatterAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ResizeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef ResizeAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ResizeAttributeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_MODE = 4 + }; + tosaFb::ResizeMode mode() const { return static_cast(GetField(VT_MODE, 0)); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_MIN_INT, 4) && - VerifyField(verifier, VT_MAX_INT, 4) && VerifyOffset(verifier, VT_MIN_FP) && verifier.VerifyVector(min_fp()) && - VerifyOffset(verifier, VT_MAX_FP) && verifier.VerifyVector(max_fp()) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_MODE, 4) && verifier.EndTable(); } }; -struct ClampAttributeBuilder +struct ResizeAttributeBuilder { - typedef ClampAttribute Table; + typedef ResizeAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_min_int(int32_t min_int) { fbb_.AddElement(ClampAttribute::VT_MIN_INT, min_int, 0); } - void add_max_int(int32_t max_int) { fbb_.AddElement(ClampAttribute::VT_MAX_INT, max_int, 0); } - void add_min_fp(::flatbuffers::Offset<::flatbuffers::Vector> min_fp) - { - fbb_.AddOffset(ClampAttribute::VT_MIN_FP, min_fp); - } - void add_max_fp(::flatbuffers::Offset<::flatbuffers::Vector> max_fp) + void add_mode(tosaFb::ResizeMode mode) { - fbb_.AddOffset(ClampAttribute::VT_MAX_FP, max_fp); + fbb_.AddElement(ResizeAttribute::VT_MODE, static_cast(mode), 0); } - explicit ClampAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + explicit ResizeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateClampAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - int32_t min_int = 0, int32_t max_int = 0, ::flatbuffers::Offset<::flatbuffers::Vector> min_fp = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> max_fp = 0) +inline ::flatbuffers::Offset +CreateResizeAttribute(::flatbuffers::FlatBufferBuilder &_fbb, tosaFb::ResizeMode mode = tosaFb::ResizeMode::UNKNOWN) { - ClampAttributeBuilder builder_(_fbb); - builder_.add_max_fp(max_fp); - builder_.add_min_fp(min_fp); - builder_.add_max_int(max_int); - builder_.add_min_int(min_int); + ResizeAttributeBuilder builder_(_fbb); + builder_.add_mode(mode); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateClampAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, int32_t min_int = 0, - int32_t max_int = 0, const std::vector *min_fp = nullptr, const std::vector *max_fp = nullptr) +struct CastAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - if ( min_fp ) - { - _fbb.ForceVectorAlignment(min_fp->size(), sizeof(uint8_t), 8); - } - auto min_fp__ = min_fp ? _fbb.CreateVector(*min_fp) : 0; - if ( max_fp ) + typedef CastAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CastAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct CastAttributeBuilder +{ + typedef CastAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit CastAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - _fbb.ForceVectorAlignment(max_fp->size(), sizeof(uint8_t), 8); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - auto max_fp__ = max_fp ? _fbb.CreateVector(*max_fp) : 0; - return tosaFb::CreateClampAttribute(_fbb, min_int, max_int, min_fp__, max_fp__); +}; + +inline ::flatbuffers::Offset CreateCastAttribute(::flatbuffers::FlatBufferBuilder &_fbb) +{ + CastAttributeBuilder builder_(_fbb); + return builder_.Finish(); } struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table @@ -1266,37 +3798,23 @@ struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return RescaleAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUT_ZP = 4, - VT_OUTPUT_ZP = 6, - VT_MULTIPLIER = 8, - VT_SHIFT = 10, - VT_SCALE32 = 12, - VT_DOUBLE_ROUND = 14, - VT_PER_CHANNEL = 16, - VT_INPUT_UNSIGNED = 18, - VT_OUTPUT_UNSIGNED = 20 + VT_SCALE32 = 4, + VT_ROUNDING_MODE = 6, + VT_PER_CHANNEL = 8, + VT_INPUT_UNSIGNED = 10, + VT_OUTPUT_UNSIGNED = 12 }; - int32_t input_zp() const { return GetField(VT_INPUT_ZP, 0); } - int32_t output_zp() const { return GetField(VT_OUTPUT_ZP, 0); } - const ::flatbuffers::Vector *multiplier() const - { - return GetPointer *>(VT_MULTIPLIER); - } - const ::flatbuffers::Vector *shift() const + bool scale32() const { return GetField(VT_SCALE32, 0) != 0; } + tosaFb::RoundingMode rounding_mode() const { - return GetPointer *>(VT_SHIFT); + return static_cast(GetField(VT_ROUNDING_MODE, 0)); } - bool scale32() const { return GetField(VT_SCALE32, 0) != 0; } - bool double_round() const { return GetField(VT_DOUBLE_ROUND, 0) != 0; } bool per_channel() const { return GetField(VT_PER_CHANNEL, 0) != 0; } bool input_unsigned() const { return GetField(VT_INPUT_UNSIGNED, 0) != 0; } bool output_unsigned() const { return GetField(VT_OUTPUT_UNSIGNED, 0) != 0; } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_INPUT_ZP, 4) && - VerifyField(verifier, VT_OUTPUT_ZP, 4) && VerifyOffset(verifier, VT_MULTIPLIER) && - verifier.VerifyVector(multiplier()) && VerifyOffset(verifier, VT_SHIFT) && verifier.VerifyVector(shift()) && - VerifyField(verifier, VT_SCALE32, 1) && VerifyField(verifier, VT_DOUBLE_ROUND, 1) && + return VerifyTableStart(verifier) && VerifyField(verifier, VT_SCALE32, 1) && VerifyField(verifier, VT_ROUNDING_MODE, 4) && VerifyField(verifier, VT_PER_CHANNEL, 1) && VerifyField(verifier, VT_INPUT_UNSIGNED, 1) && VerifyField(verifier, VT_OUTPUT_UNSIGNED, 1) && verifier.EndTable(); } @@ -1307,23 +3825,13 @@ struct RescaleAttributeBuilder typedef RescaleAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_input_zp(int32_t input_zp) { fbb_.AddElement(RescaleAttribute::VT_INPUT_ZP, input_zp, 0); } - void add_output_zp(int32_t output_zp) { fbb_.AddElement(RescaleAttribute::VT_OUTPUT_ZP, output_zp, 0); } - void add_multiplier(::flatbuffers::Offset<::flatbuffers::Vector> multiplier) - { - fbb_.AddOffset(RescaleAttribute::VT_MULTIPLIER, multiplier); - } - void add_shift(::flatbuffers::Offset<::flatbuffers::Vector> shift) - { - fbb_.AddOffset(RescaleAttribute::VT_SHIFT, shift); - } void add_scale32(bool scale32) { fbb_.AddElement(RescaleAttribute::VT_SCALE32, static_cast(scale32), 0); } - void add_double_round(bool double_round) + void add_rounding_mode(tosaFb::RoundingMode rounding_mode) { - fbb_.AddElement(RescaleAttribute::VT_DOUBLE_ROUND, static_cast(double_round), 0); + fbb_.AddElement(RescaleAttribute::VT_ROUNDING_MODE, static_cast(rounding_mode), 0); } void add_per_channel(bool per_channel) { @@ -1350,136 +3858,166 @@ struct RescaleAttributeBuilder }; inline ::flatbuffers::Offset CreateRescaleAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - int32_t input_zp = 0, int32_t output_zp = 0, ::flatbuffers::Offset<::flatbuffers::Vector> multiplier = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> shift = 0, bool scale32 = false, bool double_round = false, - bool per_channel = false, bool input_unsigned = false, bool output_unsigned = false) + bool scale32 = false, tosaFb::RoundingMode rounding_mode = tosaFb::RoundingMode::UNKNOWN, bool per_channel = false, + bool input_unsigned = false, bool output_unsigned = false) { RescaleAttributeBuilder builder_(_fbb); - builder_.add_shift(shift); - builder_.add_multiplier(multiplier); - builder_.add_output_zp(output_zp); - builder_.add_input_zp(input_zp); + builder_.add_rounding_mode(rounding_mode); builder_.add_output_unsigned(output_unsigned); builder_.add_input_unsigned(input_unsigned); builder_.add_per_channel(per_channel); - builder_.add_double_round(double_round); builder_.add_scale32(scale32); return builder_.Finish(); } -inline ::flatbuffers::Offset -CreateRescaleAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, int32_t input_zp = 0, int32_t output_zp = 0, - const std::vector *multiplier = nullptr, const std::vector *shift = nullptr, bool scale32 = false, - bool double_round = false, bool per_channel = false, bool input_unsigned = false, bool output_unsigned = false) +struct ConstAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - auto multiplier__ = multiplier ? _fbb.CreateVector(*multiplier) : 0; - auto shift__ = shift ? _fbb.CreateVector(*shift) : 0; - return tosaFb::CreateRescaleAttribute(_fbb, input_zp, output_zp, multiplier__, shift__, scale32, double_round, - per_channel, input_unsigned, output_unsigned); -} + typedef ConstAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ConstAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; -struct MulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct ConstAttributeBuilder { - typedef MulAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MulAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_SHIFT = 4 - }; - int32_t shift() const { return GetField(VT_SHIFT, 0); } - bool Verify(::flatbuffers::Verifier &verifier) const + typedef ConstAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ConstAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_SHIFT, 4) && verifier.EndTable(); + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } }; -struct MulAttributeBuilder +inline ::flatbuffers::Offset CreateConstAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - typedef MulAttribute Table; + ConstAttributeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct IdentityAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef IdentityAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return IdentityAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } +}; + +struct IdentityAttributeBuilder +{ + typedef IdentityAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_shift(int32_t shift) { fbb_.AddElement(MulAttribute::VT_SHIFT, shift, 0); } - explicit MulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + explicit IdentityAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateMulAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t shift = 0) +inline ::flatbuffers::Offset CreateIdentityAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - MulAttributeBuilder builder_(_fbb); - builder_.add_shift(shift); + IdentityAttributeBuilder builder_(_fbb); return builder_.Finish(); } -struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct CustomAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef ArithmeticRightShiftAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ArithmeticRightShiftAttributeTypeTable(); } + typedef CustomAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CustomAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ROUND = 4 + VT_OPERATOR_NAME = 4, + VT_DOMAIN_NAME = 6, + VT_IMPLEMENTATION_ATTRS = 8 }; - bool round() const { return GetField(VT_ROUND, 0) != 0; } + const ::flatbuffers::String *operator_name() const + { + return GetPointer(VT_OPERATOR_NAME); + } + const ::flatbuffers::String *domain_name() const + { + return GetPointer(VT_DOMAIN_NAME); + } + const ::flatbuffers::Vector *implementation_attrs() const + { + return GetPointer *>(VT_IMPLEMENTATION_ATTRS); + } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_ROUND, 1) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_OPERATOR_NAME) && verifier.VerifyString(operator_name()) && + VerifyOffset(verifier, VT_DOMAIN_NAME) && verifier.VerifyString(domain_name()) && VerifyOffset(verifier, VT_IMPLEMENTATION_ATTRS) && + verifier.VerifyVector(implementation_attrs()) && verifier.EndTable(); } }; -struct ArithmeticRightShiftAttributeBuilder +struct CustomAttributeBuilder { - typedef ArithmeticRightShiftAttribute Table; + typedef CustomAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_round(bool round) + void add_operator_name(::flatbuffers::Offset<::flatbuffers::String> operator_name) { - fbb_.AddElement(ArithmeticRightShiftAttribute::VT_ROUND, static_cast(round), 0); + fbb_.AddOffset(CustomAttribute::VT_OPERATOR_NAME, operator_name); } - explicit ArithmeticRightShiftAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + void add_domain_name(::flatbuffers::Offset<::flatbuffers::String> domain_name) { - start_ = fbb_.StartTable(); + fbb_.AddOffset(CustomAttribute::VT_DOMAIN_NAME, domain_name); } - ::flatbuffers::Offset Finish() + void add_implementation_attrs(::flatbuffers::Offset<::flatbuffers::Vector> implementation_attrs) + { + fbb_.AddOffset(CustomAttribute::VT_IMPLEMENTATION_ATTRS, implementation_attrs); + } + explicit CustomAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset -CreateArithmeticRightShiftAttribute(::flatbuffers::FlatBufferBuilder &_fbb, bool round = false) +inline ::flatbuffers::Offset CreateCustomAttribute(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> operator_name = 0, ::flatbuffers::Offset<::flatbuffers::String> domain_name = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> implementation_attrs = 0) { - ArithmeticRightShiftAttributeBuilder builder_(_fbb); - builder_.add_round(round); + CustomAttributeBuilder builder_(_fbb); + builder_.add_implementation_attrs(implementation_attrs); + builder_.add_domain_name(domain_name); + builder_.add_operator_name(operator_name); return builder_.Finish(); } +inline ::flatbuffers::Offset CreateCustomAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const char *operator_name = nullptr, const char *domain_name = nullptr, const std::vector *implementation_attrs = nullptr) +{ + auto operator_name__ = operator_name ? _fbb.CreateString(operator_name) : 0; + auto domain_name__ = domain_name ? _fbb.CreateString(domain_name) : 0; + auto implementation_attrs__ = implementation_attrs ? _fbb.CreateVector(*implementation_attrs) : 0; + return tosaFb::CreateCustomAttribute(_fbb, operator_name__, domain_name__, implementation_attrs__); +} + struct CondIfAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { typedef CondIfAttributeBuilder Builder; static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CondIfAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_THEN_BRANCH = 4, - VT_ELSE_BRANCH = 6 + VT_THEN_GRAPH = 4, + VT_ELSE_GRAPH = 6 }; - const ::flatbuffers::String *then_branch() const - { - return GetPointer(VT_THEN_BRANCH); - } - const ::flatbuffers::String *else_branch() const - { - return GetPointer(VT_ELSE_BRANCH); - } + const ::flatbuffers::String *then_graph() const { return GetPointer(VT_THEN_GRAPH); } + const ::flatbuffers::String *else_graph() const { return GetPointer(VT_ELSE_GRAPH); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_THEN_BRANCH) && verifier.VerifyString(then_branch()) && - VerifyOffset(verifier, VT_ELSE_BRANCH) && verifier.VerifyString(else_branch()) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_THEN_GRAPH) && verifier.VerifyString(then_graph()) && + VerifyOffset(verifier, VT_ELSE_GRAPH) && verifier.VerifyString(else_graph()) && verifier.EndTable(); } }; @@ -1488,13 +4026,13 @@ struct CondIfAttributeBuilder typedef CondIfAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_then_branch(::flatbuffers::Offset<::flatbuffers::String> then_branch) + void add_then_graph(::flatbuffers::Offset<::flatbuffers::String> then_graph) { - fbb_.AddOffset(CondIfAttribute::VT_THEN_BRANCH, then_branch); + fbb_.AddOffset(CondIfAttribute::VT_THEN_GRAPH, then_graph); } - void add_else_branch(::flatbuffers::Offset<::flatbuffers::String> else_branch) + void add_else_graph(::flatbuffers::Offset<::flatbuffers::String> else_graph) { - fbb_.AddOffset(CondIfAttribute::VT_ELSE_BRANCH, else_branch); + fbb_.AddOffset(CondIfAttribute::VT_ELSE_GRAPH, else_graph); } explicit CondIfAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() @@ -1506,20 +4044,20 @@ struct CondIfAttributeBuilder }; inline ::flatbuffers::Offset CreateCondIfAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::String> then_branch = 0, ::flatbuffers::Offset<::flatbuffers::String> else_branch = 0) + ::flatbuffers::Offset<::flatbuffers::String> then_graph = 0, ::flatbuffers::Offset<::flatbuffers::String> else_graph = 0) { CondIfAttributeBuilder builder_(_fbb); - builder_.add_else_branch(else_branch); - builder_.add_then_branch(then_branch); + builder_.add_else_graph(else_graph); + builder_.add_then_graph(then_graph); return builder_.Finish(); } inline ::flatbuffers::Offset CreateCondIfAttributeDirect( - ::flatbuffers::FlatBufferBuilder &_fbb, const char *then_branch = nullptr, const char *else_branch = nullptr) + ::flatbuffers::FlatBufferBuilder &_fbb, const char *then_graph = nullptr, const char *else_graph = nullptr) { - auto then_branch__ = then_branch ? _fbb.CreateString(then_branch) : 0; - auto else_branch__ = else_branch ? _fbb.CreateString(else_branch) : 0; - return tosaFb::CreateCondIfAttribute(_fbb, then_branch__, else_branch__); + auto then_graph__ = then_graph ? _fbb.CreateString(then_graph) : 0; + auto else_graph__ = else_graph ? _fbb.CreateString(else_graph) : 0; + return tosaFb::CreateCondIfAttribute(_fbb, then_graph__, else_graph__); } struct WhileLoopAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table @@ -1528,21 +4066,15 @@ struct WhileLoopAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return WhileLoopAttributeTypeTable(); } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COND_BRANCH = 4, - VT_BODY_BRANCH = 6 + VT_COND_GRAPH = 4, + VT_BODY_GRAPH = 6 }; - const ::flatbuffers::String *cond_branch() const - { - return GetPointer(VT_COND_BRANCH); - } - const ::flatbuffers::String *body_branch() const - { - return GetPointer(VT_BODY_BRANCH); - } + const ::flatbuffers::String *cond_graph() const { return GetPointer(VT_COND_GRAPH); } + const ::flatbuffers::String *body_graph() const { return GetPointer(VT_BODY_GRAPH); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_COND_BRANCH) && verifier.VerifyString(cond_branch()) && - VerifyOffset(verifier, VT_BODY_BRANCH) && verifier.VerifyString(body_branch()) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_COND_GRAPH) && verifier.VerifyString(cond_graph()) && + VerifyOffset(verifier, VT_BODY_GRAPH) && verifier.VerifyString(body_graph()) && verifier.EndTable(); } }; @@ -1551,13 +4083,13 @@ struct WhileLoopAttributeBuilder typedef WhileLoopAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_cond_branch(::flatbuffers::Offset<::flatbuffers::String> cond_branch) + void add_cond_graph(::flatbuffers::Offset<::flatbuffers::String> cond_graph) { - fbb_.AddOffset(WhileLoopAttribute::VT_COND_BRANCH, cond_branch); + fbb_.AddOffset(WhileLoopAttribute::VT_COND_GRAPH, cond_graph); } - void add_body_branch(::flatbuffers::Offset<::flatbuffers::String> body_branch) + void add_body_graph(::flatbuffers::Offset<::flatbuffers::String> body_graph) { - fbb_.AddOffset(WhileLoopAttribute::VT_BODY_BRANCH, body_branch); + fbb_.AddOffset(WhileLoopAttribute::VT_BODY_GRAPH, body_graph); } explicit WhileLoopAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { @@ -1572,371 +4104,139 @@ struct WhileLoopAttributeBuilder }; inline ::flatbuffers::Offset CreateWhileLoopAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::String> cond_branch = 0, ::flatbuffers::Offset<::flatbuffers::String> body_branch = 0) + ::flatbuffers::Offset<::flatbuffers::String> cond_graph = 0, ::flatbuffers::Offset<::flatbuffers::String> body_graph = 0) { WhileLoopAttributeBuilder builder_(_fbb); - builder_.add_body_branch(body_branch); - builder_.add_cond_branch(cond_branch); + builder_.add_body_graph(body_graph); + builder_.add_cond_graph(cond_graph); return builder_.Finish(); } inline ::flatbuffers::Offset CreateWhileLoopAttributeDirect( - ::flatbuffers::FlatBufferBuilder &_fbb, const char *cond_branch = nullptr, const char *body_branch = nullptr) + ::flatbuffers::FlatBufferBuilder &_fbb, const char *cond_graph = nullptr, const char *body_graph = nullptr) { - auto cond_branch__ = cond_branch ? _fbb.CreateString(cond_branch) : 0; - auto body_branch__ = body_branch ? _fbb.CreateString(body_branch) : 0; - return tosaFb::CreateWhileLoopAttribute(_fbb, cond_branch__, body_branch__); + auto cond_graph__ = cond_graph ? _fbb.CreateString(cond_graph) : 0; + auto body_graph__ = body_graph ? _fbb.CreateString(body_graph) : 0; + return tosaFb::CreateWhileLoopAttribute(_fbb, cond_graph__, body_graph__); } -struct TransposeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct VariableAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef TransposeAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TransposeAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_PERMS = 4 - }; - const ::flatbuffers::Vector *perms() const - { - return GetPointer *>(VT_PERMS); - } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PERMS) && verifier.VerifyVector(perms()) && - verifier.EndTable(); - } + typedef VariableAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return VariableAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; -struct TransposeAttributeBuilder +struct VariableAttributeBuilder { - typedef TransposeAttribute Table; + typedef VariableAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_perms(::flatbuffers::Offset<::flatbuffers::Vector> perms) - { - fbb_.AddOffset(TransposeAttribute::VT_PERMS, perms); - } - explicit TransposeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + explicit VariableAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() - { - const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); - return o; - } -}; - -inline ::flatbuffers::Offset CreateTransposeAttribute( - ::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector> perms = 0) -{ - TransposeAttributeBuilder builder_(_fbb); - builder_.add_perms(perms); - return builder_.Finish(); -} - -inline ::flatbuffers::Offset -CreateTransposeAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *perms = nullptr) -{ - auto perms__ = perms ? _fbb.CreateVector(*perms) : 0; - return tosaFb::CreateTransposeAttribute(_fbb, perms__); -} - -struct TableAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef TableAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TableAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_TABLE = 4 - }; - const ::flatbuffers::Vector *table() const - { - return GetPointer *>(VT_TABLE); - } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TABLE) && verifier.VerifyVector(table()) && - verifier.EndTable(); - } -}; - -struct TableAttributeBuilder -{ - typedef TableAttribute Table; - ::flatbuffers::FlatBufferBuilder &fbb_; - ::flatbuffers::uoffset_t start_; - void add_table(::flatbuffers::Offset<::flatbuffers::Vector> table) - { - fbb_.AddOffset(TableAttribute::VT_TABLE, table); - } - explicit TableAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() - { - const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); - return o; - } -}; - -inline ::flatbuffers::Offset CreateTableAttribute( - ::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector> table = 0) -{ - TableAttributeBuilder builder_(_fbb); - builder_.add_table(table); - return builder_.Finish(); -} - -inline ::flatbuffers::Offset -CreateTableAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector *table = nullptr) -{ - auto table__ = table ? _fbb.CreateVector(*table) : 0; - return tosaFb::CreateTableAttribute(_fbb, table__); -} - -struct MatMulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef MatMulAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return MatMulAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_A_ZP = 4, - VT_B_ZP = 6 - }; - int32_t a_zp() const { return GetField(VT_A_ZP, 0); } - int32_t b_zp() const { return GetField(VT_B_ZP, 0); } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_A_ZP, 4) && - VerifyField(verifier, VT_B_ZP, 4) && verifier.EndTable(); - } -}; - -struct MatMulAttributeBuilder -{ - typedef MatMulAttribute Table; - ::flatbuffers::FlatBufferBuilder &fbb_; - ::flatbuffers::uoffset_t start_; - void add_a_zp(int32_t a_zp) { fbb_.AddElement(MatMulAttribute::VT_A_ZP, a_zp, 0); } - void add_b_zp(int32_t b_zp) { fbb_.AddElement(MatMulAttribute::VT_B_ZP, b_zp, 0); } - explicit MatMulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset -CreateMatMulAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t a_zp = 0, int32_t b_zp = 0) +inline ::flatbuffers::Offset CreateVariableAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - MatMulAttributeBuilder builder_(_fbb); - builder_.add_b_zp(b_zp); - builder_.add_a_zp(a_zp); + VariableAttributeBuilder builder_(_fbb); return builder_.Finish(); } -struct FullyConnectedAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct VariableWriteAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef FullyConnectedAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return FullyConnectedAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_INPUT_ZP = 4, - VT_WEIGHT_ZP = 6 - }; - int32_t input_zp() const { return GetField(VT_INPUT_ZP, 0); } - int32_t weight_zp() const { return GetField(VT_WEIGHT_ZP, 0); } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_INPUT_ZP, 4) && - VerifyField(verifier, VT_WEIGHT_ZP, 4) && verifier.EndTable(); - } + typedef VariableWriteAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return VariableWriteAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; -struct FullyConnectedAttributeBuilder +struct VariableWriteAttributeBuilder { - typedef FullyConnectedAttribute Table; + typedef VariableWriteAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_input_zp(int32_t input_zp) { fbb_.AddElement(FullyConnectedAttribute::VT_INPUT_ZP, input_zp, 0); } - void add_weight_zp(int32_t weight_zp) - { - fbb_.AddElement(FullyConnectedAttribute::VT_WEIGHT_ZP, weight_zp, 0); - } - explicit FullyConnectedAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + explicit VariableWriteAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() - { - const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); - return o; - } -}; - -inline ::flatbuffers::Offset -CreateFullyConnectedAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t input_zp = 0, int32_t weight_zp = 0) -{ - FullyConnectedAttributeBuilder builder_(_fbb); - builder_.add_weight_zp(weight_zp); - builder_.add_input_zp(input_zp); - return builder_.Finish(); -} - -struct NegateAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef NegateAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return NegateAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_INPUT1_ZP = 4, - VT_OUTPUT_ZP = 6 - }; - int32_t input1_zp() const { return GetField(VT_INPUT1_ZP, 0); } - int32_t output_zp() const { return GetField(VT_OUTPUT_ZP, 0); } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_INPUT1_ZP, 4) && - VerifyField(verifier, VT_OUTPUT_ZP, 4) && verifier.EndTable(); - } -}; - -struct NegateAttributeBuilder -{ - typedef NegateAttribute Table; - ::flatbuffers::FlatBufferBuilder &fbb_; - ::flatbuffers::uoffset_t start_; - void add_input1_zp(int32_t input1_zp) { fbb_.AddElement(NegateAttribute::VT_INPUT1_ZP, input1_zp, 0); } - void add_output_zp(int32_t output_zp) { fbb_.AddElement(NegateAttribute::VT_OUTPUT_ZP, output_zp, 0); } - explicit NegateAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset -CreateNegateAttribute(::flatbuffers::FlatBufferBuilder &_fbb, int32_t input1_zp = 0, int32_t output_zp = 0) +inline ::flatbuffers::Offset CreateVariableWriteAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - NegateAttributeBuilder builder_(_fbb); - builder_.add_output_zp(output_zp); - builder_.add_input1_zp(input1_zp); + VariableWriteAttributeBuilder builder_(_fbb); return builder_.Finish(); } -struct CustomAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct VariableReadAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef CustomAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return CustomAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_IDENTIFIER = 4, - VT_CONFIG = 6, - VT_IMPLEMENTATION_ATTRS = 8 - }; - const ::flatbuffers::String *identifier() const { return GetPointer(VT_IDENTIFIER); } - const ::flatbuffers::String *config() const { return GetPointer(VT_CONFIG); } - const ::flatbuffers::Vector *implementation_attrs() const - { - return GetPointer *>(VT_IMPLEMENTATION_ATTRS); - } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_IDENTIFIER) && verifier.VerifyString(identifier()) && - VerifyOffset(verifier, VT_CONFIG) && verifier.VerifyString(config()) && VerifyOffset(verifier, VT_IMPLEMENTATION_ATTRS) && - verifier.VerifyVector(implementation_attrs()) && verifier.EndTable(); - } + typedef VariableReadAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return VariableReadAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; -struct CustomAttributeBuilder -{ - typedef CustomAttribute Table; - ::flatbuffers::FlatBufferBuilder &fbb_; - ::flatbuffers::uoffset_t start_; - void add_identifier(::flatbuffers::Offset<::flatbuffers::String> identifier) - { - fbb_.AddOffset(CustomAttribute::VT_IDENTIFIER, identifier); - } - void add_config(::flatbuffers::Offset<::flatbuffers::String> config) - { - fbb_.AddOffset(CustomAttribute::VT_CONFIG, config); - } - void add_implementation_attrs(::flatbuffers::Offset<::flatbuffers::Vector> implementation_attrs) +struct VariableReadAttributeBuilder +{ + typedef VariableReadAttribute Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit VariableReadAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddOffset(CustomAttribute::VT_IMPLEMENTATION_ATTRS, implementation_attrs); + start_ = fbb_.StartTable(); } - explicit CustomAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateCustomAttribute(::flatbuffers::FlatBufferBuilder &_fbb, - ::flatbuffers::Offset<::flatbuffers::String> identifier = 0, ::flatbuffers::Offset<::flatbuffers::String> config = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> implementation_attrs = 0) +inline ::flatbuffers::Offset CreateVariableReadAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - CustomAttributeBuilder builder_(_fbb); - builder_.add_implementation_attrs(implementation_attrs); - builder_.add_config(config); - builder_.add_identifier(identifier); + VariableReadAttributeBuilder builder_(_fbb); return builder_.Finish(); } -inline ::flatbuffers::Offset CreateCustomAttributeDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const char *identifier = nullptr, const char *config = nullptr, const std::vector *implementation_attrs = nullptr) -{ - auto identifier__ = identifier ? _fbb.CreateString(identifier) : 0; - auto config__ = config ? _fbb.CreateString(config) : 0; - auto implementation_attrs__ = implementation_attrs ? _fbb.CreateVector(*implementation_attrs) : 0; - return tosaFb::CreateCustomAttribute(_fbb, identifier__, config__, implementation_attrs__); -} - -struct FFTAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +struct ConstShapeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef FFTAttributeBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return FFTAttributeTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE - { - VT_INVERSE = 4 - }; - bool inverse() const { return GetField(VT_INVERSE, 0) != 0; } - bool Verify(::flatbuffers::Verifier &verifier) const - { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_INVERSE, 1) && verifier.EndTable(); - } + typedef ConstShapeAttributeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return ConstShapeAttributeTypeTable(); } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } }; -struct FFTAttributeBuilder +struct ConstShapeAttributeBuilder { - typedef FFTAttribute Table; + typedef ConstShapeAttribute Table; ::flatbuffers::FlatBufferBuilder &fbb_; ::flatbuffers::uoffset_t start_; - void add_inverse(bool inverse) + explicit ConstShapeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { - fbb_.AddElement(FFTAttribute::VT_INVERSE, static_cast(inverse), 0); + start_ = fbb_.StartTable(); } - explicit FFTAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ::flatbuffers::Offset Finish() + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline ::flatbuffers::Offset CreateFFTAttribute(::flatbuffers::FlatBufferBuilder &_fbb, bool inverse = false) +inline ::flatbuffers::Offset CreateConstShapeAttribute(::flatbuffers::FlatBufferBuilder &_fbb) { - FFTAttributeBuilder builder_(_fbb); - builder_.add_inverse(inverse); + ConstShapeAttributeBuilder builder_(_fbb); return builder_.Finish(); } @@ -2002,7 +4302,8 @@ struct TosaTensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table VT_TYPE = 8, VT_DATA = 10, VT_VARIABLE = 12, - VT_IS_UNRANKED = 14 + VT_IS_UNRANKED = 14, + VT_VARIABLE_NAME = 16 }; const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } const ::flatbuffers::Vector *shape() const @@ -2016,12 +4317,17 @@ struct TosaTensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table } bool variable() const { return GetField(VT_VARIABLE, 0) != 0; } bool is_unranked() const { return GetField(VT_IS_UNRANKED, 0) != 0; } + const ::flatbuffers::String *variable_name() const + { + return GetPointer(VT_VARIABLE_NAME); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_SHAPE) && verifier.VerifyVector(shape()) && VerifyField(verifier, VT_TYPE, 4) && - VerifyOffset(verifier, VT_DATA) && verifier.VerifyVector(data()) && VerifyField(verifier, VT_VARIABLE, 1) && - VerifyField(verifier, VT_IS_UNRANKED, 1) && verifier.EndTable(); + VerifyOffset(verifier, VT_SHAPE) && verifier.VerifyVector(shape()) && + VerifyField(verifier, VT_TYPE, 4) && VerifyOffset(verifier, VT_DATA) && verifier.VerifyVector(data()) && + VerifyField(verifier, VT_VARIABLE, 1) && VerifyField(verifier, VT_IS_UNRANKED, 1) && + VerifyOffset(verifier, VT_VARIABLE_NAME) && verifier.VerifyString(variable_name()) && verifier.EndTable(); } }; @@ -2051,88 +4357,415 @@ struct TosaTensorBuilder { fbb_.AddElement(TosaTensor::VT_IS_UNRANKED, static_cast(is_unranked), 0); } + void add_variable_name(::flatbuffers::Offset<::flatbuffers::String> variable_name) + { + fbb_.AddOffset(TosaTensor::VT_VARIABLE_NAME, variable_name); + } explicit TosaTensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = ::flatbuffers::Offset(end); - return o; + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTosaTensor(::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, ::flatbuffers::Offset<::flatbuffers::Vector> shape = 0, + tosaFb::DType type = tosaFb::DType::UNKNOWN, ::flatbuffers::Offset<::flatbuffers::Vector> data = 0, + bool variable = false, bool is_unranked = false, ::flatbuffers::Offset<::flatbuffers::String> variable_name = 0) +{ + TosaTensorBuilder builder_(_fbb); + builder_.add_variable_name(variable_name); + builder_.add_data(data); + builder_.add_type(type); + builder_.add_shape(shape); + builder_.add_name(name); + builder_.add_is_unranked(is_unranked); + builder_.add_variable(variable); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTosaTensorDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, const std::vector *shape = nullptr, tosaFb::DType type = tosaFb::DType::UNKNOWN, + const std::vector *data = nullptr, bool variable = false, bool is_unranked = false, const char *variable_name = nullptr) +{ + auto name__ = name ? _fbb.CreateString(name) : 0; + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + if ( data ) + { + _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 8); + } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + auto variable_name__ = variable_name ? _fbb.CreateString(variable_name) : 0; + return tosaFb::CreateTosaTensor(_fbb, name__, shape__, type, data__, variable, is_unranked, variable_name__); +} + +struct TosaShape FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef TosaShapeBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TosaShapeTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_NAME = 4, + VT_RANK = 6, + VT_DATA = 8 + }; + const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + uint32_t rank() const { return GetField(VT_RANK, 0); } + const ::flatbuffers::Vector *data() const + { + return GetPointer *>(VT_DATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) && + VerifyField(verifier, VT_RANK, 4) && VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && verifier.EndTable(); + } +}; + +struct TosaShapeBuilder +{ + typedef TosaShape Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { fbb_.AddOffset(TosaShape::VT_NAME, name); } + void add_rank(uint32_t rank) { fbb_.AddElement(TosaShape::VT_RANK, rank, 0); } + void add_data(::flatbuffers::Offset<::flatbuffers::Vector> data) + { + fbb_.AddOffset(TosaShape::VT_DATA, data); + } + explicit TosaShapeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset +CreateTosaShape(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::String> name = 0, + uint32_t rank = 0, ::flatbuffers::Offset<::flatbuffers::Vector> data = 0) +{ + TosaShapeBuilder builder_(_fbb); + builder_.add_data(data); + builder_.add_rank(rank); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTosaShapeDirect(::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, uint32_t rank = 0, const std::vector *data = nullptr) +{ + auto name__ = name ? _fbb.CreateString(name) : 0; + if ( data ) + { + _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 8); + } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return tosaFb::CreateTosaShape(_fbb, name__, rank, data__); +} + +struct OpLocation FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef OpLocationBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return OpLocationTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_TEXT = 4 + }; + const ::flatbuffers::String *text() const { return GetPointer(VT_TEXT); } + bool Verify(::flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TEXT) && verifier.VerifyString(text()) && + verifier.EndTable(); + } +}; + +struct OpLocationBuilder +{ + typedef OpLocation Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_text(::flatbuffers::Offset<::flatbuffers::String> text) { fbb_.AddOffset(OpLocation::VT_TEXT, text); } + explicit OpLocationBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } + ::flatbuffers::Offset Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset +CreateOpLocation(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::String> text = 0) +{ + OpLocationBuilder builder_(_fbb); + builder_.add_text(text); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateOpLocationDirect(::flatbuffers::FlatBufferBuilder &_fbb, const char *text = nullptr) +{ + auto text__ = text ? _fbb.CreateString(text) : 0; + return tosaFb::CreateOpLocation(_fbb, text__); +} + +struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table +{ + typedef TosaOperatorBuilder Builder; + static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TosaOperatorTypeTable(); } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + { + VT_OP = 4, + VT_ATTRIBUTE_TYPE = 6, + VT_ATTRIBUTE = 8, + VT_INPUTS = 10, + VT_OUTPUTS = 12, + VT_LOCATION = 14 + }; + tosaFb::Op op() const { return static_cast(GetField(VT_OP, 0)); } + tosaFb::Attribute attribute_type() const + { + return static_cast(GetField(VT_ATTRIBUTE_TYPE, 0)); + } + const void *attribute() const { return GetPointer(VT_ATTRIBUTE); } + template + const T *attribute_as() const; + const tosaFb::ArgMaxAttribute *attribute_as_ArgMaxAttribute() const + { + return attribute_type() == tosaFb::Attribute::ArgMaxAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::AvgPool2dAttribute *attribute_as_AvgPool2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::AvgPool2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::Conv2dAttribute *attribute_as_Conv2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::Conv2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::Conv3dAttribute *attribute_as_Conv3dAttribute() const + { + return attribute_type() == tosaFb::Attribute::Conv3dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::DepthwiseConv2dAttribute *attribute_as_DepthwiseConv2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::DepthwiseConv2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::FFT2dAttribute *attribute_as_FFT2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::FFT2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::MatMulAttribute *attribute_as_MatMulAttribute() const + { + return attribute_type() == tosaFb::Attribute::MatMulAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::MaxPool2dAttribute *attribute_as_MaxPool2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::MaxPool2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::RFFT2dAttribute *attribute_as_RFFT2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::RFFT2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::TransposeConv2dAttribute *attribute_as_TransposeConv2dAttribute() const + { + return attribute_type() == tosaFb::Attribute::TransposeConv2dAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ClampAttribute *attribute_as_ClampAttribute() const + { + return attribute_type() == tosaFb::Attribute::ClampAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ErfAttribute *attribute_as_ErfAttribute() const + { + return attribute_type() == tosaFb::Attribute::ErfAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::SigmoidAttribute *attribute_as_SigmoidAttribute() const + { + return attribute_type() == tosaFb::Attribute::SigmoidAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::TanhAttribute *attribute_as_TanhAttribute() const + { + return attribute_type() == tosaFb::Attribute::TanhAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::AddAttribute *attribute_as_AddAttribute() const + { + return attribute_type() == tosaFb::Attribute::AddAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ArithmeticRightShiftAttribute *attribute_as_ArithmeticRightShiftAttribute() const + { + return attribute_type() == tosaFb::Attribute::ArithmeticRightShiftAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::BitwiseAndAttribute *attribute_as_BitwiseAndAttribute() const + { + return attribute_type() == tosaFb::Attribute::BitwiseAndAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::BitwiseOrAttribute *attribute_as_BitwiseOrAttribute() const + { + return attribute_type() == tosaFb::Attribute::BitwiseOrAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::BitwiseXorAttribute *attribute_as_BitwiseXorAttribute() const + { + return attribute_type() == tosaFb::Attribute::BitwiseXorAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::IntDivAttribute *attribute_as_IntDivAttribute() const + { + return attribute_type() == tosaFb::Attribute::IntDivAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalAndAttribute *attribute_as_LogicalAndAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalAndAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalLeftShiftAttribute *attribute_as_LogicalLeftShiftAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalLeftShiftAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalRightShiftAttribute *attribute_as_LogicalRightShiftAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalRightShiftAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalOrAttribute *attribute_as_LogicalOrAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalOrAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalXorAttribute *attribute_as_LogicalXorAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalXorAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::MaximumAttribute *attribute_as_MaximumAttribute() const + { + return attribute_type() == tosaFb::Attribute::MaximumAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::MinimumAttribute *attribute_as_MinimumAttribute() const + { + return attribute_type() == tosaFb::Attribute::MinimumAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::MulAttribute *attribute_as_MulAttribute() const + { + return attribute_type() == tosaFb::Attribute::MulAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::PowAttribute *attribute_as_PowAttribute() const + { + return attribute_type() == tosaFb::Attribute::PowAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::SubAttribute *attribute_as_SubAttribute() const + { + return attribute_type() == tosaFb::Attribute::SubAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::TableAttribute *attribute_as_TableAttribute() const + { + return attribute_type() == tosaFb::Attribute::TableAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::AbsAttribute *attribute_as_AbsAttribute() const + { + return attribute_type() == tosaFb::Attribute::AbsAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::BitwiseNotAttribute *attribute_as_BitwiseNotAttribute() const + { + return attribute_type() == tosaFb::Attribute::BitwiseNotAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::CeilAttribute *attribute_as_CeilAttribute() const + { + return attribute_type() == tosaFb::Attribute::CeilAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ClzAttribute *attribute_as_ClzAttribute() const + { + return attribute_type() == tosaFb::Attribute::ClzAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::CosAttribute *attribute_as_CosAttribute() const + { + return attribute_type() == tosaFb::Attribute::CosAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ExpAttribute *attribute_as_ExpAttribute() const + { + return attribute_type() == tosaFb::Attribute::ExpAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::FloorAttribute *attribute_as_FloorAttribute() const + { + return attribute_type() == tosaFb::Attribute::FloorAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogAttribute *attribute_as_LogAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::LogicalNotAttribute *attribute_as_LogicalNotAttribute() const + { + return attribute_type() == tosaFb::Attribute::LogicalNotAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::NegateAttribute *attribute_as_NegateAttribute() const + { + return attribute_type() == tosaFb::Attribute::NegateAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ReciprocalAttribute *attribute_as_ReciprocalAttribute() const + { + return attribute_type() == tosaFb::Attribute::ReciprocalAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::RsqrtAttribute *attribute_as_RsqrtAttribute() const + { + return attribute_type() == tosaFb::Attribute::RsqrtAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::SinAttribute *attribute_as_SinAttribute() const + { + return attribute_type() == tosaFb::Attribute::SinAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::SelectAttribute *attribute_as_SelectAttribute() const + { + return attribute_type() == tosaFb::Attribute::SelectAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::EqualAttribute *attribute_as_EqualAttribute() const + { + return attribute_type() == tosaFb::Attribute::EqualAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::GreaterAttribute *attribute_as_GreaterAttribute() const + { + return attribute_type() == tosaFb::Attribute::GreaterAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::GreaterEqualAttribute *attribute_as_GreaterEqualAttribute() const + { + return attribute_type() == tosaFb::Attribute::GreaterEqualAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ReduceAllAttribute *attribute_as_ReduceAllAttribute() const + { + return attribute_type() == tosaFb::Attribute::ReduceAllAttribute ? static_cast(attribute()) : nullptr; } -}; - -inline ::flatbuffers::Offset -CreateTosaTensor(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::String> name = 0, - ::flatbuffers::Offset<::flatbuffers::Vector> shape = 0, tosaFb::DType type = tosaFb::DType::UNKNOWN, - ::flatbuffers::Offset<::flatbuffers::Vector> data = 0, bool variable = false, bool is_unranked = false) -{ - TosaTensorBuilder builder_(_fbb); - builder_.add_data(data); - builder_.add_type(type); - builder_.add_shape(shape); - builder_.add_name(name); - builder_.add_is_unranked(is_unranked); - builder_.add_variable(variable); - return builder_.Finish(); -} - -inline ::flatbuffers::Offset CreateTosaTensorDirect(::flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, const std::vector *shape = nullptr, tosaFb::DType type = tosaFb::DType::UNKNOWN, - const std::vector *data = nullptr, bool variable = false, bool is_unranked = false) -{ - auto name__ = name ? _fbb.CreateString(name) : 0; - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - if ( data ) + const tosaFb::ReduceAnyAttribute *attribute_as_ReduceAnyAttribute() const { - _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 8); + return attribute_type() == tosaFb::Attribute::ReduceAnyAttribute ? static_cast(attribute()) : nullptr; } - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tosaFb::CreateTosaTensor(_fbb, name__, shape__, type, data__, variable, is_unranked); -} - -struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table -{ - typedef TosaOperatorBuilder Builder; - static const ::flatbuffers::TypeTable *MiniReflectTypeTable() { return TosaOperatorTypeTable(); } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE + const tosaFb::ReduceMaxAttribute *attribute_as_ReduceMaxAttribute() const { - VT_OP = 4, - VT_ATTRIBUTE_TYPE = 6, - VT_ATTRIBUTE = 8, - VT_INPUTS = 10, - VT_OUTPUTS = 12 - }; - tosaFb::Op op() const { return static_cast(GetField(VT_OP, 0)); } - tosaFb::Attribute attribute_type() const + return attribute_type() == tosaFb::Attribute::ReduceMaxAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ReduceMinAttribute *attribute_as_ReduceMinAttribute() const { - return static_cast(GetField(VT_ATTRIBUTE_TYPE, 0)); + return attribute_type() == tosaFb::Attribute::ReduceMinAttribute ? static_cast(attribute()) : nullptr; } - const void *attribute() const { return GetPointer(VT_ATTRIBUTE); } - template - const T *attribute_as() const; - const tosaFb::PoolAttribute *attribute_as_PoolAttribute() const + const tosaFb::ReduceProductAttribute *attribute_as_ReduceProductAttribute() const { - return attribute_type() == tosaFb::Attribute::PoolAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::ReduceProductAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::ConvAttribute *attribute_as_ConvAttribute() const + const tosaFb::ReduceSumAttribute *attribute_as_ReduceSumAttribute() const { - return attribute_type() == tosaFb::Attribute::ConvAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::ReduceSumAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::TransposeConvAttribute *attribute_as_TransposeConvAttribute() const + const tosaFb::ConcatAttribute *attribute_as_ConcatAttribute() const { - return attribute_type() == tosaFb::Attribute::TransposeConvAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::ConcatAttribute ? static_cast(attribute()) : nullptr; } const tosaFb::PadAttribute *attribute_as_PadAttribute() const { return attribute_type() == tosaFb::Attribute::PadAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::AxisAttribute *attribute_as_AxisAttribute() const - { - return attribute_type() == tosaFb::Attribute::AxisAttribute ? static_cast(attribute()) : nullptr; - } const tosaFb::ReshapeAttribute *attribute_as_ReshapeAttribute() const { return attribute_type() == tosaFb::Attribute::ReshapeAttribute ? static_cast(attribute()) : nullptr; } + const tosaFb::ReverseAttribute *attribute_as_ReverseAttribute() const + { + return attribute_type() == tosaFb::Attribute::ReverseAttribute ? static_cast(attribute()) : nullptr; + } const tosaFb::SliceAttribute *attribute_as_SliceAttribute() const { return attribute_type() == tosaFb::Attribute::SliceAttribute ? static_cast(attribute()) : nullptr; @@ -2141,25 +4774,41 @@ struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { return attribute_type() == tosaFb::Attribute::TileAttribute ? static_cast(attribute()) : nullptr; } + const tosaFb::TransposeAttribute *attribute_as_TransposeAttribute() const + { + return attribute_type() == tosaFb::Attribute::TransposeAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::GatherAttribute *attribute_as_GatherAttribute() const + { + return attribute_type() == tosaFb::Attribute::GatherAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::ScatterAttribute *attribute_as_ScatterAttribute() const + { + return attribute_type() == tosaFb::Attribute::ScatterAttribute ? static_cast(attribute()) : nullptr; + } const tosaFb::ResizeAttribute *attribute_as_ResizeAttribute() const { return attribute_type() == tosaFb::Attribute::ResizeAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::ClampAttribute *attribute_as_ClampAttribute() const + const tosaFb::CastAttribute *attribute_as_CastAttribute() const { - return attribute_type() == tosaFb::Attribute::ClampAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::CastAttribute ? static_cast(attribute()) : nullptr; } const tosaFb::RescaleAttribute *attribute_as_RescaleAttribute() const { return attribute_type() == tosaFb::Attribute::RescaleAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::MulAttribute *attribute_as_MulAttribute() const + const tosaFb::ConstAttribute *attribute_as_ConstAttribute() const { - return attribute_type() == tosaFb::Attribute::MulAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::ConstAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::ArithmeticRightShiftAttribute *attribute_as_ArithmeticRightShiftAttribute() const + const tosaFb::IdentityAttribute *attribute_as_IdentityAttribute() const { - return attribute_type() == tosaFb::Attribute::ArithmeticRightShiftAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::IdentityAttribute ? static_cast(attribute()) : nullptr; + } + const tosaFb::CustomAttribute *attribute_as_CustomAttribute() const + { + return attribute_type() == tosaFb::Attribute::CustomAttribute ? static_cast(attribute()) : nullptr; } const tosaFb::CondIfAttribute *attribute_as_CondIfAttribute() const { @@ -2169,33 +4818,21 @@ struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { return attribute_type() == tosaFb::Attribute::WhileLoopAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::TransposeAttribute *attribute_as_TransposeAttribute() const - { - return attribute_type() == tosaFb::Attribute::TransposeAttribute ? static_cast(attribute()) : nullptr; - } - const tosaFb::TableAttribute *attribute_as_TableAttribute() const - { - return attribute_type() == tosaFb::Attribute::TableAttribute ? static_cast(attribute()) : nullptr; - } - const tosaFb::MatMulAttribute *attribute_as_MatMulAttribute() const - { - return attribute_type() == tosaFb::Attribute::MatMulAttribute ? static_cast(attribute()) : nullptr; - } - const tosaFb::FullyConnectedAttribute *attribute_as_FullyConnectedAttribute() const + const tosaFb::VariableAttribute *attribute_as_VariableAttribute() const { - return attribute_type() == tosaFb::Attribute::FullyConnectedAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::VariableAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::NegateAttribute *attribute_as_NegateAttribute() const + const tosaFb::VariableWriteAttribute *attribute_as_VariableWriteAttribute() const { - return attribute_type() == tosaFb::Attribute::NegateAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::VariableWriteAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::CustomAttribute *attribute_as_CustomAttribute() const + const tosaFb::VariableReadAttribute *attribute_as_VariableReadAttribute() const { - return attribute_type() == tosaFb::Attribute::CustomAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::VariableReadAttribute ? static_cast(attribute()) : nullptr; } - const tosaFb::FFTAttribute *attribute_as_FFTAttribute() const + const tosaFb::ConstShapeAttribute *attribute_as_ConstShapeAttribute() const { - return attribute_type() == tosaFb::Attribute::FFTAttribute ? static_cast(attribute()) : nullptr; + return attribute_type() == tosaFb::Attribute::ConstShapeAttribute ? static_cast(attribute()) : nullptr; } const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs() const { @@ -2205,44 +4842,351 @@ struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { return GetPointer> *>(VT_OUTPUTS); } + const tosaFb::OpLocation *location() const { return GetPointer(VT_LOCATION); } bool Verify(::flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && VerifyField(verifier, VT_OP, 4) && - VerifyField(verifier, VT_ATTRIBUTE_TYPE, 1) && VerifyOffset(verifier, VT_ATTRIBUTE) && - VerifyAttribute(verifier, attribute(), attribute_type()) && VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && verifier.VerifyVectorOfStrings(inputs()) && VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && verifier.VerifyVectorOfStrings(outputs()) && verifier.EndTable(); + return VerifyTableStart(verifier) && VerifyField(verifier, VT_OP, 4) && VerifyField(verifier, VT_ATTRIBUTE_TYPE, 1) && + VerifyOffset(verifier, VT_ATTRIBUTE) && VerifyAttribute(verifier, attribute(), attribute_type()) && + VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && verifier.VerifyVectorOfStrings(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) && verifier.VerifyVectorOfStrings(outputs()) && + VerifyOffset(verifier, VT_LOCATION) && verifier.VerifyTable(location()) && verifier.EndTable(); } }; template<> -inline const tosaFb::PoolAttribute *TosaOperator::attribute_as() const +inline const tosaFb::ArgMaxAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ArgMaxAttribute(); +} + +template<> +inline const tosaFb::AvgPool2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_AvgPool2dAttribute(); +} + +template<> +inline const tosaFb::Conv2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_Conv2dAttribute(); +} + +template<> +inline const tosaFb::Conv3dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_Conv3dAttribute(); +} + +template<> +inline const tosaFb::DepthwiseConv2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_DepthwiseConv2dAttribute(); +} + +template<> +inline const tosaFb::FFT2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_FFT2dAttribute(); +} + +template<> +inline const tosaFb::MatMulAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_MatMulAttribute(); +} + +template<> +inline const tosaFb::MaxPool2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_MaxPool2dAttribute(); +} + +template<> +inline const tosaFb::RFFT2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_RFFT2dAttribute(); +} + +template<> +inline const tosaFb::TransposeConv2dAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_TransposeConv2dAttribute(); +} + +template<> +inline const tosaFb::ClampAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ClampAttribute(); +} + +template<> +inline const tosaFb::ErfAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ErfAttribute(); +} + +template<> +inline const tosaFb::SigmoidAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_SigmoidAttribute(); +} + +template<> +inline const tosaFb::TanhAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_TanhAttribute(); +} + +template<> +inline const tosaFb::AddAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_AddAttribute(); +} + +template<> +inline const tosaFb::ArithmeticRightShiftAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ArithmeticRightShiftAttribute(); +} + +template<> +inline const tosaFb::BitwiseAndAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_BitwiseAndAttribute(); +} + +template<> +inline const tosaFb::BitwiseOrAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_BitwiseOrAttribute(); +} + +template<> +inline const tosaFb::BitwiseXorAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_BitwiseXorAttribute(); +} + +template<> +inline const tosaFb::IntDivAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_IntDivAttribute(); +} + +template<> +inline const tosaFb::LogicalAndAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalAndAttribute(); +} + +template<> +inline const tosaFb::LogicalLeftShiftAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalLeftShiftAttribute(); +} + +template<> +inline const tosaFb::LogicalRightShiftAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalRightShiftAttribute(); +} + +template<> +inline const tosaFb::LogicalOrAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalOrAttribute(); +} + +template<> +inline const tosaFb::LogicalXorAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalXorAttribute(); +} + +template<> +inline const tosaFb::MaximumAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_MaximumAttribute(); +} + +template<> +inline const tosaFb::MinimumAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_MinimumAttribute(); +} + +template<> +inline const tosaFb::MulAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_MulAttribute(); +} + +template<> +inline const tosaFb::PowAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_PowAttribute(); +} + +template<> +inline const tosaFb::SubAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_SubAttribute(); +} + +template<> +inline const tosaFb::TableAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_TableAttribute(); +} + +template<> +inline const tosaFb::AbsAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_AbsAttribute(); +} + +template<> +inline const tosaFb::BitwiseNotAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_BitwiseNotAttribute(); +} + +template<> +inline const tosaFb::CeilAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_CeilAttribute(); +} + +template<> +inline const tosaFb::ClzAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ClzAttribute(); +} + +template<> +inline const tosaFb::CosAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_CosAttribute(); +} + +template<> +inline const tosaFb::ExpAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ExpAttribute(); +} + +template<> +inline const tosaFb::FloorAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_FloorAttribute(); +} + +template<> +inline const tosaFb::LogAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogAttribute(); +} + +template<> +inline const tosaFb::LogicalNotAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_LogicalNotAttribute(); +} + +template<> +inline const tosaFb::NegateAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_NegateAttribute(); +} + +template<> +inline const tosaFb::ReciprocalAttribute *TosaOperator::attribute_as() const { - return attribute_as_PoolAttribute(); + return attribute_as_ReciprocalAttribute(); } template<> -inline const tosaFb::ConvAttribute *TosaOperator::attribute_as() const +inline const tosaFb::RsqrtAttribute *TosaOperator::attribute_as() const { - return attribute_as_ConvAttribute(); + return attribute_as_RsqrtAttribute(); } template<> -inline const tosaFb::TransposeConvAttribute *TosaOperator::attribute_as() const +inline const tosaFb::SinAttribute *TosaOperator::attribute_as() const { - return attribute_as_TransposeConvAttribute(); + return attribute_as_SinAttribute(); } template<> -inline const tosaFb::PadAttribute *TosaOperator::attribute_as() const +inline const tosaFb::SelectAttribute *TosaOperator::attribute_as() const { - return attribute_as_PadAttribute(); + return attribute_as_SelectAttribute(); +} + +template<> +inline const tosaFb::EqualAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_EqualAttribute(); +} + +template<> +inline const tosaFb::GreaterAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_GreaterAttribute(); +} + +template<> +inline const tosaFb::GreaterEqualAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_GreaterEqualAttribute(); +} + +template<> +inline const tosaFb::ReduceAllAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceAllAttribute(); +} + +template<> +inline const tosaFb::ReduceAnyAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceAnyAttribute(); +} + +template<> +inline const tosaFb::ReduceMaxAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceMaxAttribute(); +} + +template<> +inline const tosaFb::ReduceMinAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceMinAttribute(); +} + +template<> +inline const tosaFb::ReduceProductAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceProductAttribute(); +} + +template<> +inline const tosaFb::ReduceSumAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReduceSumAttribute(); +} + +template<> +inline const tosaFb::ConcatAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ConcatAttribute(); } template<> -inline const tosaFb::AxisAttribute *TosaOperator::attribute_as() const +inline const tosaFb::PadAttribute *TosaOperator::attribute_as() const { - return attribute_as_AxisAttribute(); + return attribute_as_PadAttribute(); } template<> @@ -2251,6 +5195,12 @@ inline const tosaFb::ReshapeAttribute *TosaOperator::attribute_as +inline const tosaFb::ReverseAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_ReverseAttribute(); +} + template<> inline const tosaFb::SliceAttribute *TosaOperator::attribute_as() const { @@ -2264,87 +5214,93 @@ inline const tosaFb::TileAttribute *TosaOperator::attribute_as -inline const tosaFb::ResizeAttribute *TosaOperator::attribute_as() const +inline const tosaFb::TransposeAttribute *TosaOperator::attribute_as() const { - return attribute_as_ResizeAttribute(); + return attribute_as_TransposeAttribute(); } template<> -inline const tosaFb::ClampAttribute *TosaOperator::attribute_as() const +inline const tosaFb::GatherAttribute *TosaOperator::attribute_as() const { - return attribute_as_ClampAttribute(); + return attribute_as_GatherAttribute(); } template<> -inline const tosaFb::RescaleAttribute *TosaOperator::attribute_as() const +inline const tosaFb::ScatterAttribute *TosaOperator::attribute_as() const { - return attribute_as_RescaleAttribute(); + return attribute_as_ScatterAttribute(); } template<> -inline const tosaFb::MulAttribute *TosaOperator::attribute_as() const +inline const tosaFb::ResizeAttribute *TosaOperator::attribute_as() const { - return attribute_as_MulAttribute(); + return attribute_as_ResizeAttribute(); } template<> -inline const tosaFb::ArithmeticRightShiftAttribute *TosaOperator::attribute_as() const +inline const tosaFb::CastAttribute *TosaOperator::attribute_as() const { - return attribute_as_ArithmeticRightShiftAttribute(); + return attribute_as_CastAttribute(); } template<> -inline const tosaFb::CondIfAttribute *TosaOperator::attribute_as() const +inline const tosaFb::RescaleAttribute *TosaOperator::attribute_as() const { - return attribute_as_CondIfAttribute(); + return attribute_as_RescaleAttribute(); } template<> -inline const tosaFb::WhileLoopAttribute *TosaOperator::attribute_as() const +inline const tosaFb::ConstAttribute *TosaOperator::attribute_as() const { - return attribute_as_WhileLoopAttribute(); + return attribute_as_ConstAttribute(); } template<> -inline const tosaFb::TransposeAttribute *TosaOperator::attribute_as() const +inline const tosaFb::IdentityAttribute *TosaOperator::attribute_as() const { - return attribute_as_TransposeAttribute(); + return attribute_as_IdentityAttribute(); } template<> -inline const tosaFb::TableAttribute *TosaOperator::attribute_as() const +inline const tosaFb::CustomAttribute *TosaOperator::attribute_as() const { - return attribute_as_TableAttribute(); + return attribute_as_CustomAttribute(); } template<> -inline const tosaFb::MatMulAttribute *TosaOperator::attribute_as() const +inline const tosaFb::CondIfAttribute *TosaOperator::attribute_as() const { - return attribute_as_MatMulAttribute(); + return attribute_as_CondIfAttribute(); } template<> -inline const tosaFb::FullyConnectedAttribute *TosaOperator::attribute_as() const +inline const tosaFb::WhileLoopAttribute *TosaOperator::attribute_as() const { - return attribute_as_FullyConnectedAttribute(); + return attribute_as_WhileLoopAttribute(); } template<> -inline const tosaFb::NegateAttribute *TosaOperator::attribute_as() const +inline const tosaFb::VariableAttribute *TosaOperator::attribute_as() const { - return attribute_as_NegateAttribute(); + return attribute_as_VariableAttribute(); } template<> -inline const tosaFb::CustomAttribute *TosaOperator::attribute_as() const +inline const tosaFb::VariableWriteAttribute *TosaOperator::attribute_as() const { - return attribute_as_CustomAttribute(); + return attribute_as_VariableWriteAttribute(); +} + +template<> +inline const tosaFb::VariableReadAttribute *TosaOperator::attribute_as() const +{ + return attribute_as_VariableReadAttribute(); } template<> -inline const tosaFb::FFTAttribute *TosaOperator::attribute_as() const +inline const tosaFb::ConstShapeAttribute *TosaOperator::attribute_as() const { - return attribute_as_FFTAttribute(); + return attribute_as_ConstShapeAttribute(); } struct TosaOperatorBuilder @@ -2366,6 +5322,10 @@ struct TosaOperatorBuilder { fbb_.AddOffset(TosaOperator::VT_OUTPUTS, outputs); } + void add_location(::flatbuffers::Offset location) + { + fbb_.AddOffset(TosaOperator::VT_LOCATION, location); + } explicit TosaOperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -2378,9 +5338,11 @@ struct TosaOperatorBuilder inline ::flatbuffers::Offset CreateTosaOperator(::flatbuffers::FlatBufferBuilder &_fbb, tosaFb::Op op = tosaFb::Op::UNKNOWN, tosaFb::Attribute attribute_type = tosaFb::Attribute::NONE, ::flatbuffers::Offset attribute = 0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs = 0, - ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0) + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0, + ::flatbuffers::Offset location = 0) { TosaOperatorBuilder builder_(_fbb); + builder_.add_location(location); builder_.add_outputs(outputs); builder_.add_inputs(inputs); builder_.add_attribute(attribute); @@ -2392,11 +5354,11 @@ inline ::flatbuffers::Offset CreateTosaOperator(::flatbuffers::Fla inline ::flatbuffers::Offset CreateTosaOperatorDirect(::flatbuffers::FlatBufferBuilder &_fbb, tosaFb::Op op = tosaFb::Op::UNKNOWN, tosaFb::Attribute attribute_type = tosaFb::Attribute::NONE, ::flatbuffers::Offset attribute = 0, const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs = nullptr, - const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr) + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr, ::flatbuffers::Offset location = 0) { auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*inputs) : 0; auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*outputs) : 0; - return tosaFb::CreateTosaOperator(_fbb, op, attribute_type, attribute, inputs__, outputs__); + return tosaFb::CreateTosaOperator(_fbb, op, attribute_type, attribute, inputs__, outputs__, location); } struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table @@ -2409,7 +5371,8 @@ struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table VT_OPERATORS = 6, VT_TENSORS = 8, VT_INPUTS = 10, - VT_OUTPUTS = 12 + VT_OUTPUTS = 12, + VT_SHAPES = 14 }; const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } const ::flatbuffers::Vector<::flatbuffers::Offset> *operators() const @@ -2428,13 +5391,18 @@ struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { return GetPointer> *>(VT_OUTPUTS); } + const ::flatbuffers::Vector<::flatbuffers::Offset> *shapes() const + { + return GetPointer> *>(VT_SHAPES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) && VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) && verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_TENSORS) && verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) && VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && verifier.VerifyVectorOfStrings(inputs()) && VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && verifier.VerifyVectorOfStrings(outputs()) && verifier.EndTable(); + verifier.VerifyVector(outputs()) && verifier.VerifyVectorOfStrings(outputs()) && VerifyOffset(verifier, VT_SHAPES) && + verifier.VerifyVector(shapes()) && verifier.VerifyVectorOfTables(shapes()) && verifier.EndTable(); } }; @@ -2460,6 +5428,10 @@ struct TosaBasicBlockBuilder { fbb_.AddOffset(TosaBasicBlock::VT_OUTPUTS, outputs); } + void add_shapes(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> shapes) + { + fbb_.AddOffset(TosaBasicBlock::VT_SHAPES, shapes); + } explicit TosaBasicBlockBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset Finish() { @@ -2474,9 +5446,11 @@ CreateTosaBasicBlock(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offs ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators = 0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> tensors = 0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs = 0, - ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0) + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> shapes = 0) { TosaBasicBlockBuilder builder_(_fbb); + builder_.add_shapes(shapes); builder_.add_outputs(outputs); builder_.add_inputs(inputs); builder_.add_tensors(tensors); @@ -2489,14 +5463,16 @@ inline ::flatbuffers::Offset CreateTosaBasicBlockDirect(::flatbu const char *name = nullptr, const std::vector<::flatbuffers::Offset> *operators = nullptr, const std::vector<::flatbuffers::Offset> *tensors = nullptr, const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs = nullptr, - const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr) + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr, + const std::vector<::flatbuffers::Offset> *shapes = nullptr) { auto name__ = name ? _fbb.CreateString(name) : 0; auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset>(*operators) : 0; auto tensors__ = tensors ? _fbb.CreateVector<::flatbuffers::Offset>(*tensors) : 0; auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*inputs) : 0; auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*outputs) : 0; - return tosaFb::CreateTosaBasicBlock(_fbb, name__, operators__, tensors__, inputs__, outputs__); + auto shapes__ = shapes ? _fbb.CreateVector<::flatbuffers::Offset>(*shapes) : 0; + return tosaFb::CreateTosaBasicBlock(_fbb, name__, operators__, tensors__, inputs__, outputs__, shapes__); } struct TosaRegion FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table @@ -2622,31 +5598,286 @@ inline bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, { case Attribute::NONE: { - return true; + return true; + } + case Attribute::ArgMaxAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::AvgPool2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::Conv2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::Conv3dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::DepthwiseConv2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::FFT2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::MatMulAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::MaxPool2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::RFFT2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::TransposeConv2dAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ClampAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ErfAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::SigmoidAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::TanhAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::AddAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ArithmeticRightShiftAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::BitwiseAndAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::BitwiseOrAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::BitwiseXorAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::IntDivAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalAndAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalLeftShiftAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalRightShiftAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalOrAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalXorAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::MaximumAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::MinimumAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::MulAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::PowAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::SubAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::TableAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::AbsAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::BitwiseNotAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::CeilAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ClzAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::CosAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ExpAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::FloorAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::LogicalNotAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::NegateAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ReciprocalAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::RsqrtAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::SinAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::SelectAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::EqualAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::GreaterAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::GreaterEqualAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ReduceAllAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case Attribute::PoolAttribute: + case Attribute::ReduceAnyAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::ConvAttribute: + case Attribute::ReduceMaxAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::TransposeConvAttribute: + case Attribute::ReduceMinAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::PadAttribute: + case Attribute::ReduceProductAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ReduceSumAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::ConcatAttribute: + { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::AxisAttribute: + case Attribute::PadAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } case Attribute::ReshapeAttribute: @@ -2654,6 +5885,11 @@ inline bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } + case Attribute::ReverseAttribute: + { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } case Attribute::SliceAttribute: { auto ptr = reinterpret_cast(obj); @@ -2664,74 +5900,79 @@ inline bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::ResizeAttribute: + case Attribute::TransposeAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::ClampAttribute: + case Attribute::GatherAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::RescaleAttribute: + case Attribute::ScatterAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::MulAttribute: + case Attribute::ResizeAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::ArithmeticRightShiftAttribute: + case Attribute::CastAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::CondIfAttribute: + case Attribute::RescaleAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::WhileLoopAttribute: + case Attribute::ConstAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::TransposeAttribute: + case Attribute::IdentityAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::TableAttribute: + case Attribute::CustomAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::MatMulAttribute: + case Attribute::CondIfAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::FullyConnectedAttribute: + case Attribute::WhileLoopAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::NegateAttribute: + case Attribute::VariableAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::CustomAttribute: + case Attribute::VariableWriteAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case Attribute::VariableReadAttribute: + { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case Attribute::FFTAttribute: + case Attribute::ConstShapeAttribute: { - auto ptr = reinterpret_cast(obj); + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } default: @@ -2739,148 +5980,509 @@ inline bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, } } -inline bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, - const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) +inline bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, + const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) +{ + if ( !values || !types ) return !values && !types; + if ( values->size() != types->size() ) return false; + for ( ::flatbuffers::uoffset_t i = 0; i < values->size(); ++i ) + { + if ( !VerifyAttribute(verifier, values->Get(i), types->GetEnum(i)) ) + { + return false; + } + } + return true; +} + +inline const ::flatbuffers::TypeTable *DTypeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"UNKNOWN", "BOOL", "INT4", "INT8", "INT16", "INT32", "INT48", "FP32", "FP16", + "BF16", "SHAPE", "FP8E4M3", "FP8E5M2"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 13, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ResizeModeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = { + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::ResizeModeTypeTable}; + static const char *const names[] = {"UNKNOWN", "NEAREST", "BILINEAR"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *NanPropagationModeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = { + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"UNKNOWN", "PROPAGATE", "IGNORE"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *RoundingModeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::RoundingModeTypeTable}; + static const char *const names[] = {"UNKNOWN", "SINGLE_ROUND", "INEXACT_ROUND", "DOUBLE_ROUND"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 4, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *OpTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::OpTypeTable}; + static const char *const names[] = {"UNKNOWN", "ARGMAX", "AVG_POOL2D", "CONV2D", "CONV3D", "DEPTHWISE_CONV2D", "FFT2D", + "MATMUL", "MAX_POOL2D", "RFFT2D", "TRANSPOSE_CONV2D", "CLAMP", "ERF", "SIGMOID", "TANH", "ADD", "ARITHMETIC_RIGHT_SHIFT", + "BITWISE_AND", "BITWISE_OR", "BITWISE_XOR", "INTDIV", "LOGICAL_AND", "LOGICAL_LEFT_SHIFT", "LOGICAL_RIGHT_SHIFT", + "LOGICAL_OR", "LOGICAL_XOR", "MAXIMUM", "MINIMUM", "MUL", "POW", "SUB", "TABLE", "ABS", "BITWISE_NOT", "CEIL", "CLZ", + "COS", "EXP", "FLOOR", "LOG", "LOGICAL_NOT", "NEGATE", "RECIPROCAL", "RSQRT", "SIN", "SELECT", "EQUAL", "GREATER", + "GREATER_EQUAL", "REDUCE_ALL", "REDUCE_ANY", "REDUCE_MAX", "REDUCE_MIN", "REDUCE_PRODUCT", "REDUCE_SUM", "CONCAT", + "PAD", "RESHAPE", "REVERSE", "SLICE", "TILE", "TRANSPOSE", "GATHER", "SCATTER", "RESIZE", "CAST", "RESCALE", "CONST", + "IDENTITY", "CUSTOM", "COND_IF", "WHILE_LOOP", "VARIABLE", "VARIABLE_WRITE", "VARIABLE_READ", "CONST_SHAPE"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 76, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *AttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_SEQUENCE, 0, -1}, + {::flatbuffers::ET_SEQUENCE, 0, 0}, {::flatbuffers::ET_SEQUENCE, 0, 1}, {::flatbuffers::ET_SEQUENCE, 0, 2}, + {::flatbuffers::ET_SEQUENCE, 0, 3}, {::flatbuffers::ET_SEQUENCE, 0, 4}, {::flatbuffers::ET_SEQUENCE, 0, 5}, + {::flatbuffers::ET_SEQUENCE, 0, 6}, {::flatbuffers::ET_SEQUENCE, 0, 7}, {::flatbuffers::ET_SEQUENCE, 0, 8}, + {::flatbuffers::ET_SEQUENCE, 0, 9}, {::flatbuffers::ET_SEQUENCE, 0, 10}, {::flatbuffers::ET_SEQUENCE, 0, 11}, + {::flatbuffers::ET_SEQUENCE, 0, 12}, {::flatbuffers::ET_SEQUENCE, 0, 13}, {::flatbuffers::ET_SEQUENCE, 0, 14}, + {::flatbuffers::ET_SEQUENCE, 0, 15}, {::flatbuffers::ET_SEQUENCE, 0, 16}, {::flatbuffers::ET_SEQUENCE, 0, 17}, + {::flatbuffers::ET_SEQUENCE, 0, 18}, {::flatbuffers::ET_SEQUENCE, 0, 19}, {::flatbuffers::ET_SEQUENCE, 0, 20}, + {::flatbuffers::ET_SEQUENCE, 0, 21}, {::flatbuffers::ET_SEQUENCE, 0, 22}, {::flatbuffers::ET_SEQUENCE, 0, 23}, + {::flatbuffers::ET_SEQUENCE, 0, 24}, {::flatbuffers::ET_SEQUENCE, 0, 25}, {::flatbuffers::ET_SEQUENCE, 0, 26}, + {::flatbuffers::ET_SEQUENCE, 0, 27}, {::flatbuffers::ET_SEQUENCE, 0, 28}, {::flatbuffers::ET_SEQUENCE, 0, 29}, + {::flatbuffers::ET_SEQUENCE, 0, 30}, {::flatbuffers::ET_SEQUENCE, 0, 31}, {::flatbuffers::ET_SEQUENCE, 0, 32}, + {::flatbuffers::ET_SEQUENCE, 0, 33}, {::flatbuffers::ET_SEQUENCE, 0, 34}, {::flatbuffers::ET_SEQUENCE, 0, 35}, + {::flatbuffers::ET_SEQUENCE, 0, 36}, {::flatbuffers::ET_SEQUENCE, 0, 37}, {::flatbuffers::ET_SEQUENCE, 0, 38}, + {::flatbuffers::ET_SEQUENCE, 0, 39}, {::flatbuffers::ET_SEQUENCE, 0, 40}, {::flatbuffers::ET_SEQUENCE, 0, 41}, + {::flatbuffers::ET_SEQUENCE, 0, 42}, {::flatbuffers::ET_SEQUENCE, 0, 43}, {::flatbuffers::ET_SEQUENCE, 0, 44}, + {::flatbuffers::ET_SEQUENCE, 0, 45}, {::flatbuffers::ET_SEQUENCE, 0, 46}, {::flatbuffers::ET_SEQUENCE, 0, 47}, + {::flatbuffers::ET_SEQUENCE, 0, 48}, {::flatbuffers::ET_SEQUENCE, 0, 49}, {::flatbuffers::ET_SEQUENCE, 0, 50}, + {::flatbuffers::ET_SEQUENCE, 0, 51}, {::flatbuffers::ET_SEQUENCE, 0, 52}, {::flatbuffers::ET_SEQUENCE, 0, 53}, + {::flatbuffers::ET_SEQUENCE, 0, 54}, {::flatbuffers::ET_SEQUENCE, 0, 55}, {::flatbuffers::ET_SEQUENCE, 0, 56}, + {::flatbuffers::ET_SEQUENCE, 0, 57}, {::flatbuffers::ET_SEQUENCE, 0, 58}, {::flatbuffers::ET_SEQUENCE, 0, 59}, + {::flatbuffers::ET_SEQUENCE, 0, 60}, {::flatbuffers::ET_SEQUENCE, 0, 61}, {::flatbuffers::ET_SEQUENCE, 0, 62}, + {::flatbuffers::ET_SEQUENCE, 0, 63}, {::flatbuffers::ET_SEQUENCE, 0, 64}, {::flatbuffers::ET_SEQUENCE, 0, 65}, + {::flatbuffers::ET_SEQUENCE, 0, 66}, {::flatbuffers::ET_SEQUENCE, 0, 67}, {::flatbuffers::ET_SEQUENCE, 0, 68}, + {::flatbuffers::ET_SEQUENCE, 0, 69}, {::flatbuffers::ET_SEQUENCE, 0, 70}, {::flatbuffers::ET_SEQUENCE, 0, 71}, + {::flatbuffers::ET_SEQUENCE, 0, 72}, {::flatbuffers::ET_SEQUENCE, 0, 73}, {::flatbuffers::ET_SEQUENCE, 0, 74}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::ArgMaxAttributeTypeTable, + tosaFb::AvgPool2dAttributeTypeTable, tosaFb::Conv2dAttributeTypeTable, tosaFb::Conv3dAttributeTypeTable, + tosaFb::DepthwiseConv2dAttributeTypeTable, tosaFb::FFT2dAttributeTypeTable, tosaFb::MatMulAttributeTypeTable, + tosaFb::MaxPool2dAttributeTypeTable, tosaFb::RFFT2dAttributeTypeTable, tosaFb::TransposeConv2dAttributeTypeTable, + tosaFb::ClampAttributeTypeTable, tosaFb::ErfAttributeTypeTable, tosaFb::SigmoidAttributeTypeTable, + tosaFb::TanhAttributeTypeTable, tosaFb::AddAttributeTypeTable, tosaFb::ArithmeticRightShiftAttributeTypeTable, + tosaFb::BitwiseAndAttributeTypeTable, tosaFb::BitwiseOrAttributeTypeTable, tosaFb::BitwiseXorAttributeTypeTable, + tosaFb::IntDivAttributeTypeTable, tosaFb::LogicalAndAttributeTypeTable, tosaFb::LogicalLeftShiftAttributeTypeTable, + tosaFb::LogicalRightShiftAttributeTypeTable, tosaFb::LogicalOrAttributeTypeTable, tosaFb::LogicalXorAttributeTypeTable, + tosaFb::MaximumAttributeTypeTable, tosaFb::MinimumAttributeTypeTable, tosaFb::MulAttributeTypeTable, + tosaFb::PowAttributeTypeTable, tosaFb::SubAttributeTypeTable, tosaFb::TableAttributeTypeTable, tosaFb::AbsAttributeTypeTable, + tosaFb::BitwiseNotAttributeTypeTable, tosaFb::CeilAttributeTypeTable, tosaFb::ClzAttributeTypeTable, + tosaFb::CosAttributeTypeTable, tosaFb::ExpAttributeTypeTable, tosaFb::FloorAttributeTypeTable, tosaFb::LogAttributeTypeTable, + tosaFb::LogicalNotAttributeTypeTable, tosaFb::NegateAttributeTypeTable, tosaFb::ReciprocalAttributeTypeTable, + tosaFb::RsqrtAttributeTypeTable, tosaFb::SinAttributeTypeTable, tosaFb::SelectAttributeTypeTable, + tosaFb::EqualAttributeTypeTable, tosaFb::GreaterAttributeTypeTable, tosaFb::GreaterEqualAttributeTypeTable, + tosaFb::ReduceAllAttributeTypeTable, tosaFb::ReduceAnyAttributeTypeTable, tosaFb::ReduceMaxAttributeTypeTable, + tosaFb::ReduceMinAttributeTypeTable, tosaFb::ReduceProductAttributeTypeTable, tosaFb::ReduceSumAttributeTypeTable, + tosaFb::ConcatAttributeTypeTable, tosaFb::PadAttributeTypeTable, tosaFb::ReshapeAttributeTypeTable, + tosaFb::ReverseAttributeTypeTable, tosaFb::SliceAttributeTypeTable, tosaFb::TileAttributeTypeTable, + tosaFb::TransposeAttributeTypeTable, tosaFb::GatherAttributeTypeTable, tosaFb::ScatterAttributeTypeTable, + tosaFb::ResizeAttributeTypeTable, tosaFb::CastAttributeTypeTable, tosaFb::RescaleAttributeTypeTable, + tosaFb::ConstAttributeTypeTable, tosaFb::IdentityAttributeTypeTable, tosaFb::CustomAttributeTypeTable, + tosaFb::CondIfAttributeTypeTable, tosaFb::WhileLoopAttributeTypeTable, tosaFb::VariableAttributeTypeTable, + tosaFb::VariableWriteAttributeTypeTable, tosaFb::VariableReadAttributeTypeTable, tosaFb::ConstShapeAttributeTypeTable}; + static const char *const names[] = {"NONE", "ArgMaxAttribute", "AvgPool2dAttribute", "Conv2dAttribute", "Conv3dAttribute", + "DepthwiseConv2dAttribute", "FFT2dAttribute", "MatMulAttribute", "MaxPool2dAttribute", "RFFT2dAttribute", + "TransposeConv2dAttribute", "ClampAttribute", "ErfAttribute", "SigmoidAttribute", "TanhAttribute", "AddAttribute", + "ArithmeticRightShiftAttribute", "BitwiseAndAttribute", "BitwiseOrAttribute", "BitwiseXorAttribute", "IntDivAttribute", + "LogicalAndAttribute", "LogicalLeftShiftAttribute", "LogicalRightShiftAttribute", "LogicalOrAttribute", + "LogicalXorAttribute", "MaximumAttribute", "MinimumAttribute", "MulAttribute", "PowAttribute", "SubAttribute", + "TableAttribute", "AbsAttribute", "BitwiseNotAttribute", "CeilAttribute", "ClzAttribute", "CosAttribute", + "ExpAttribute", "FloorAttribute", "LogAttribute", "LogicalNotAttribute", "NegateAttribute", "ReciprocalAttribute", + "RsqrtAttribute", "SinAttribute", "SelectAttribute", "EqualAttribute", "GreaterAttribute", "GreaterEqualAttribute", + "ReduceAllAttribute", "ReduceAnyAttribute", "ReduceMaxAttribute", "ReduceMinAttribute", "ReduceProductAttribute", + "ReduceSumAttribute", "ConcatAttribute", "PadAttribute", "ReshapeAttribute", "ReverseAttribute", "SliceAttribute", + "TileAttribute", "TransposeAttribute", "GatherAttribute", "ScatterAttribute", "ResizeAttribute", "CastAttribute", + "RescaleAttribute", "ConstAttribute", "IdentityAttribute", "CustomAttribute", "CondIfAttribute", "WhileLoopAttribute", + "VariableAttribute", "VariableWriteAttribute", "VariableReadAttribute", "ConstShapeAttribute"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_UNION, 76, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ArgMaxAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"axis", "nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *AvgPool2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"kernel", "stride", "pad", "acc_type"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *Conv2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"pad", "stride", "dilation", "local_bound", "acc_type"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *Conv3dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"pad", "stride", "dilation", "local_bound", "acc_type"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *DepthwiseConv2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"pad", "stride", "dilation", "local_bound", "acc_type"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *FFT2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}}; + static const char *const names[] = {"inverse", "local_bound"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *MatMulAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *MaxPool2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"kernel", "stride", "pad", "nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *RFFT2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}}; + static const char *const names[] = {"local_bound"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *TransposeConv2dAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, + {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; + static const char *const names[] = {"out_pad", "stride", "local_bound", "acc_type"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ClampAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = { + {::flatbuffers::ET_UCHAR, 1, -1}, {::flatbuffers::ET_UCHAR, 1, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"min_val", "max_val", "nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ErfAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *SigmoidAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *TanhAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *AddAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ArithmeticRightShiftAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}}; + static const char *const names[] = {"round"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *BitwiseAndAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *BitwiseOrAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *BitwiseXorAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *IntDivAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *LogicalAndAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *LogicalLeftShiftAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *LogicalRightShiftAttributeTypeTable() { - if ( !values || !types ) return !values && !types; - if ( values->size() != types->size() ) return false; - for ( ::flatbuffers::uoffset_t i = 0; i < values->size(); ++i ) - { - if ( !VerifyAttribute(verifier, values->Get(i), types->GetEnum(i)) ) - { - return false; - } - } - return true; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; } -inline const ::flatbuffers::TypeTable *DTypeTypeTable() +inline const ::flatbuffers::TypeTable *LogicalOrAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; - static const char *const names[] = {"UNKNOWN", "BOOL", "UINT8", "INT4", "INT8", "INT16", "INT32", "INT48", "FP32", - "UINT16", "FP16", "BF16", "SHAPE"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 13, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *ResizeModeTypeTable() +inline const ::flatbuffers::TypeTable *LogicalXorAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = { - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::ResizeModeTypeTable}; - static const char *const names[] = {"UNKNOWN", "NEAREST", "BILINEAR"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *OpTypeTable() +inline const ::flatbuffers::TypeTable *MaximumAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}, - {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UINT, 0, 0}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::OpTypeTable}; - static const char *const names[] = {"UNKNOWN", "ARGMAX", "AVG_POOL2D", "CONV2D", "CONV3D", "DEPTHWISE_CONV2D", - "FULLY_CONNECTED", "MATMUL", "MAX_POOL2D", "TRANSPOSE_CONV2D", "CLAMP", "RESERVED", "SIGMOID", "TANH", "ADD", - "ARITHMETIC_RIGHT_SHIFT", "BITWISE_AND", "BITWISE_OR", "BITWISE_XOR", "INTDIV", "LOGICAL_AND", "LOGICAL_LEFT_SHIFT", - "LOGICAL_RIGHT_SHIFT", "LOGICAL_OR", "LOGICAL_XOR", "MAXIMUM", "MINIMUM", "MUL", "POW", "SUB", "TABLE", "ABS", - "BITWISE_NOT", "CEIL", "CLZ", "EXP", "FLOOR", "LOG", "LOGICAL_NOT", "NEGATE", "RECIPROCAL", "RSQRT", "SELECT", - "EQUAL", "GREATER", "GREATER_EQUAL", "REDUCE_ANY", "REDUCE_ALL", "REDUCE_MAX", "REDUCE_MIN", "REDUCE_PRODUCT", - "REDUCE_SUM", "CONCAT", "PAD", "RESHAPE", "REVERSE", "SLICE", "TILE", "TRANSPOSE", "GATHER", "SCATTER", "RESIZE", - "CAST", "RESCALE", "CONST", "IDENTITY", "CUSTOM", "COND_IF", "WHILE_LOOP", "FFT2D", "RFFT2D", "ERF", "DIM"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_ENUM, 73, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *AttributeTypeTable() +inline const ::flatbuffers::TypeTable *MinimumAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_SEQUENCE, 0, -1}, {::flatbuffers::ET_SEQUENCE, 0, 0}, - {::flatbuffers::ET_SEQUENCE, 0, 1}, {::flatbuffers::ET_SEQUENCE, 0, 2}, {::flatbuffers::ET_SEQUENCE, 0, 3}, - {::flatbuffers::ET_SEQUENCE, 0, 4}, {::flatbuffers::ET_SEQUENCE, 0, 5}, {::flatbuffers::ET_SEQUENCE, 0, 6}, - {::flatbuffers::ET_SEQUENCE, 0, 7}, {::flatbuffers::ET_SEQUENCE, 0, 8}, {::flatbuffers::ET_SEQUENCE, 0, 9}, - {::flatbuffers::ET_SEQUENCE, 0, 10}, {::flatbuffers::ET_SEQUENCE, 0, 11}, {::flatbuffers::ET_SEQUENCE, 0, 12}, - {::flatbuffers::ET_SEQUENCE, 0, 13}, {::flatbuffers::ET_SEQUENCE, 0, 14}, {::flatbuffers::ET_SEQUENCE, 0, 15}, - {::flatbuffers::ET_SEQUENCE, 0, 16}, {::flatbuffers::ET_SEQUENCE, 0, 17}, {::flatbuffers::ET_SEQUENCE, 0, 18}, - {::flatbuffers::ET_SEQUENCE, 0, 19}, {::flatbuffers::ET_SEQUENCE, 0, 20}, {::flatbuffers::ET_SEQUENCE, 0, 21}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::PoolAttributeTypeTable, tosaFb::ConvAttributeTypeTable, - tosaFb::TransposeConvAttributeTypeTable, tosaFb::PadAttributeTypeTable, tosaFb::AxisAttributeTypeTable, tosaFb::ReshapeAttributeTypeTable, - tosaFb::SliceAttributeTypeTable, tosaFb::TileAttributeTypeTable, tosaFb::ResizeAttributeTypeTable, tosaFb::ClampAttributeTypeTable, - tosaFb::RescaleAttributeTypeTable, tosaFb::MulAttributeTypeTable, tosaFb::ArithmeticRightShiftAttributeTypeTable, - tosaFb::CondIfAttributeTypeTable, tosaFb::WhileLoopAttributeTypeTable, tosaFb::TransposeAttributeTypeTable, - tosaFb::TableAttributeTypeTable, tosaFb::MatMulAttributeTypeTable, tosaFb::FullyConnectedAttributeTypeTable, - tosaFb::NegateAttributeTypeTable, tosaFb::CustomAttributeTypeTable, tosaFb::FFTAttributeTypeTable}; - static const char *const names[] = {"NONE", "PoolAttribute", "ConvAttribute", "TransposeConvAttribute", "PadAttribute", - "AxisAttribute", "ReshapeAttribute", "SliceAttribute", "TileAttribute", "ResizeAttribute", "ClampAttribute", "RescaleAttribute", - "MulAttribute", "ArithmeticRightShiftAttribute", "CondIfAttribute", "WhileLoopAttribute", "TransposeAttribute", - "TableAttribute", "MatMulAttribute", "FullyConnectedAttribute", "NegateAttribute", "CustomAttribute", "FFTAttribute"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_UNION, 23, type_codes, type_refs, nullptr, nullptr, names}; - return &tt; -} - -inline const ::flatbuffers::TypeTable *PoolAttributeTypeTable() + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *MulAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, - {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; - static const char *const names[] = {"pad", "kernel", "stride", "input_zp", "output_zp", "accum_dtype"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *ConvAttributeTypeTable() +inline const ::flatbuffers::TypeTable *PowAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, - {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"pad", "stride", "dilation", "input_zp", "weight_zp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *TransposeConvAttributeTypeTable() +inline const ::flatbuffers::TypeTable *SubAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, - {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"out_pad", "stride", "output_shape", "input_zp", "weight_zp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *PadAttributeTypeTable() +inline const ::flatbuffers::TypeTable *TableAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = { - {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_UCHAR, 1, -1}}; - static const char *const names[] = {"padding", "pad_const_int", "pad_const_fp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *AbsAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *BitwiseNotAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *CeilAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ClzAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *CosAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ExpAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *FloorAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *LogAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *LogicalNotAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *NegateAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ReciprocalAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *RsqrtAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *SinAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *SelectAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *EqualAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *GreaterAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *GreaterEqualAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *AxisAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReduceAllAttributeTypeTable() { static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; static const char *const names[] = {"axis"}; @@ -2888,89 +6490,85 @@ inline const ::flatbuffers::TypeTable *AxisAttributeTypeTable() return &tt; } -inline const ::flatbuffers::TypeTable *ReshapeAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReduceAnyAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}}; - static const char *const names[] = {"new_shape"}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; + static const char *const names[] = {"axis"}; static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *SliceAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReduceMaxAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}}; - static const char *const names[] = {"start", "size"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"axis", "nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *TileAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReduceMinAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 1, -1}}; - static const char *const names[] = {"multiples"}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::NanPropagationModeTypeTable}; + static const char *const names[] = {"axis", "nan_mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ReduceProductAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; + static const char *const names[] = {"axis"}; static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *ResizeAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReduceSumAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_SHORT, 1, -1}, - {::flatbuffers::ET_SHORT, 1, -1}, {::flatbuffers::ET_SHORT, 1, -1}, {::flatbuffers::ET_UINT, 0, 0}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::ResizeModeTypeTable}; - static const char *const names[] = {"scale", "offset", "border", "mode"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; + static const char *const names[] = {"axis"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *ClampAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ConcatAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}, - {::flatbuffers::ET_UCHAR, 1, -1}, {::flatbuffers::ET_UCHAR, 1, -1}}; - static const char *const names[] = {"min_int", "max_int", "min_fp", "max_fp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; + static const char *const names[] = {"axis"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *RescaleAttributeTypeTable() +inline const ::flatbuffers::TypeTable *PadAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}, - {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}, - {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}}; - static const char *const names[] = {"input_zp", "output_zp", "multiplier", "shift", "scale32", "double_round", - "per_channel", "input_unsigned", "output_unsigned"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 9, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *MulAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReshapeAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"shift"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *ArithmeticRightShiftAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ReverseAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}}; - static const char *const names[] = {"round"}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}}; + static const char *const names[] = {"axis"}; static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *CondIfAttributeTypeTable() +inline const ::flatbuffers::TypeTable *SliceAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}}; - static const char *const names[] = {"then_branch", "else_branch"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *WhileLoopAttributeTypeTable() +inline const ::flatbuffers::TypeTable *TileAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}}; - static const char *const names[] = {"cond_branch", "body_branch"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } @@ -2982,35 +6580,52 @@ inline const ::flatbuffers::TypeTable *TransposeAttributeTypeTable() return &tt; } -inline const ::flatbuffers::TypeTable *TableAttributeTypeTable() +inline const ::flatbuffers::TypeTable *GatherAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_SHORT, 1, -1}}; - static const char *const names[] = {"table"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *MatMulAttributeTypeTable() +inline const ::flatbuffers::TypeTable *ScatterAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"a_zp", "b_zp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ResizeAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::ResizeModeTypeTable}; + static const char *const names[] = {"mode"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *FullyConnectedAttributeTypeTable() +inline const ::flatbuffers::TypeTable *CastAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"input_zp", "weight_zp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } -inline const ::flatbuffers::TypeTable *NegateAttributeTypeTable() +inline const ::flatbuffers::TypeTable *RescaleAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_INT, 0, -1}, {::flatbuffers::ET_INT, 0, -1}}; - static const char *const names[] = {"input1_zp", "output_zp"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::RoundingModeTypeTable}; + static const char *const names[] = {"scale32", "rounding_mode", "per_channel", "input_unsigned", "output_unsigned"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ConstAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *IdentityAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } @@ -3018,16 +6633,48 @@ inline const ::flatbuffers::TypeTable *CustomAttributeTypeTable() { static const ::flatbuffers::TypeCode type_codes[] = { {::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_UCHAR, 1, -1}}; - static const char *const names[] = {"identifier", "config", "implementation_attrs"}; + static const char *const names[] = {"operator_name", "domain_name", "implementation_attrs"}; static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } -inline const ::flatbuffers::TypeTable *FFTAttributeTypeTable() +inline const ::flatbuffers::TypeTable *CondIfAttributeTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_BOOL, 0, -1}}; - static const char *const names[] = {"inverse"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}}; + static const char *const names[] = {"then_graph", "else_graph"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *WhileLoopAttributeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}}; + static const char *const names[] = {"cond_graph", "body_graph"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *VariableAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *VariableWriteAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *VariableReadAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *ConstShapeAttributeTypeTable() +{ + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr, nullptr}; return &tt; } @@ -3044,30 +6691,49 @@ inline const ::flatbuffers::TypeTable *TosaTensorTypeTable() { static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_INT, 1, -1}, {::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UCHAR, 1, -1}, - {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}}; + {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_BOOL, 0, -1}, {::flatbuffers::ET_STRING, 0, -1}}; static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::DTypeTypeTable}; - static const char *const names[] = {"name", "shape", "type", "data", "variable", "is_unranked"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, nullptr, names}; + static const char *const names[] = {"name", "shape", "type", "data", "variable", "is_unranked", "variable_name"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *TosaShapeTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = { + {::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_UINT, 0, -1}, {::flatbuffers::ET_UCHAR, 1, -1}}; + static const char *const names[] = {"name", "rank", "data"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, nullptr, names}; + return &tt; +} + +inline const ::flatbuffers::TypeTable *OpLocationTypeTable() +{ + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}}; + static const char *const names[] = {"text"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names}; return &tt; } inline const ::flatbuffers::TypeTable *TosaOperatorTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, {::flatbuffers::ET_UTYPE, 0, 1}, - {::flatbuffers::ET_SEQUENCE, 0, 1}, {::flatbuffers::ET_STRING, 1, -1}, {::flatbuffers::ET_STRING, 1, -1}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::OpTypeTable, tosaFb::AttributeTypeTable}; - static const char *const names[] = {"op", "attribute_type", "attribute", "inputs", "outputs"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_UINT, 0, 0}, + {::flatbuffers::ET_UTYPE, 0, 1}, {::flatbuffers::ET_SEQUENCE, 0, 1}, {::flatbuffers::ET_STRING, 1, -1}, + {::flatbuffers::ET_STRING, 1, -1}, {::flatbuffers::ET_SEQUENCE, 0, 2}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::OpTypeTable, tosaFb::AttributeTypeTable, tosaFb::OpLocationTypeTable}; + static const char *const names[] = {"op", "attribute_type", "attribute", "inputs", "outputs", "location"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, nullptr, names}; return &tt; } inline const ::flatbuffers::TypeTable *TosaBasicBlockTypeTable() { - static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, {::flatbuffers::ET_SEQUENCE, 1, 0}, - {::flatbuffers::ET_SEQUENCE, 1, 1}, {::flatbuffers::ET_STRING, 1, -1}, {::flatbuffers::ET_STRING, 1, -1}}; - static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::TosaOperatorTypeTable, tosaFb::TosaTensorTypeTable}; - static const char *const names[] = {"name", "operators", "tensors", "inputs", "outputs"}; - static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, nullptr, names}; + static const ::flatbuffers::TypeCode type_codes[] = {{::flatbuffers::ET_STRING, 0, -1}, + {::flatbuffers::ET_SEQUENCE, 1, 0}, {::flatbuffers::ET_SEQUENCE, 1, 1}, {::flatbuffers::ET_STRING, 1, -1}, + {::flatbuffers::ET_STRING, 1, -1}, {::flatbuffers::ET_SEQUENCE, 1, 2}}; + static const ::flatbuffers::TypeFunction type_refs[] = {tosaFb::TosaOperatorTypeTable, tosaFb::TosaTensorTypeTable, tosaFb::TosaShapeTypeTable}; + static const char *const names[] = {"name", "operators", "tensors", "inputs", "outputs", "shapes"}; + static const ::flatbuffers::TypeTable tt = {::flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, nullptr, names}; return &tt; } diff --git a/ethosu/regor/tosa/tosa_validator.cpp b/ethosu/regor/tosa/tosa_validator.cpp index 168bd667..bc908d4d 100644 --- a/ethosu/regor/tosa/tosa_validator.cpp +++ b/ethosu/regor/tosa/tosa_validator.cpp @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Automatically generated by tosaValidationGenerator for TOSA Specification 0.80.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #include "tosa/tosa_validator.hpp" @@ -30,14 +30,9 @@ namespace validator void ValidateOperator(const GraphApi::GraphOperation *graphOp, const Context &context) { if ( graphOp == nullptr ) throw std::invalid_argument("No operation"); - if ( (context.version & 0xFFFFFF00) == GraphApi::VERSION_TOSA_0_80 && context.profile == GraphApi::PROFILE_BASELINE ) + if ( (context.version & 0xFFFFFF00) == GraphApi::VERSION_TOSA_1_00 && context.profile == GraphApi::PROFILE_BASELINE ) { - ValidateOperator_Version_0_80_0_Profile_BI(graphOp, context); - return; - } - if ( (context.version & 0xFFFFFF00) == GraphApi::VERSION_TOSA_0_60 && context.profile == GraphApi::PROFILE_BASELINE ) - { - ValidateOperator_Version_0_60_0_Profile_BI(graphOp, context); + ValidateOperator_Version_1_0_0_draft_Profile_PRO_INT(graphOp, context); return; } throw std::invalid_argument("TOSA version or profile not supported"); diff --git a/ethosu/regor/tosa/tosa_validator.hpp b/ethosu/regor/tosa/tosa_validator.hpp index 2ac26814..0554a8f0 100644 --- a/ethosu/regor/tosa/tosa_validator.hpp +++ b/ethosu/regor/tosa/tosa_validator.hpp @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Automatically generated by tosaValidationGenerator for TOSA Specification 0.80.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #pragma once @@ -49,14 +49,13 @@ enum class Level struct Context { - uint32_t version = GraphApi::VERSION_TOSA_0_80; + uint32_t version = GraphApi::VERSION_TOSA_1_00; int32_t profile = GraphApi::PROFILE_BASELINE; Level level = Level::Level8K; std::function GetGraph; }; -void ValidateOperator_Version_0_80_0_Profile_BI(const GraphApi::GraphOperation *graphOp, const Context &context); -void ValidateOperator_Version_0_60_0_Profile_BI(const GraphApi::GraphOperation *graphOp, const Context &context); +void ValidateOperator_Version_1_0_0_draft_Profile_PRO_INT(const GraphApi::GraphOperation *graphOp, const Context &context); void ValidateOperator(const GraphApi::GraphOperation *graphOp, const Context &context = Context{}); } // namespace validator diff --git a/ethosu/regor/tosa/tosa_validator_version_0_60_0_profile_bi.cpp b/ethosu/regor/tosa/tosa_validator_version_0_60_0_profile_bi.cpp deleted file mode 100644 index 4b150ec3..00000000 --- a/ethosu/regor/tosa/tosa_validator_version_0_60_0_profile_bi.cpp +++ /dev/null @@ -1,2898 +0,0 @@ -// -// SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the License); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Generated by tosaValidationGenerator for TOSA Specification 0.60.0 -// Do not edit. - -#include "compiler/operation.hpp" -#include "tosa/tosa_argument_checks.hpp" -#include "tosa/tosa_error_checks.hpp" -#include "tosa/tosa_level_checks.hpp" -#include "tosa/tosa_require_checks.hpp" -#include "tosa/tosa_validator.hpp" -using namespace tosa::validator; - -using namespace tosa::validator::checks; - -#define MAX_RANK (context.level == Level::Level8K ? 6 : (context.level == Level::Levelnone ? 32 : 0)) - -#define MAX_KERNEL (context.level == Level::Level8K ? 8192 : (context.level == Level::Levelnone ? 2147483647 : 0)) - -#define MAX_SCALE (context.level == Level::Level8K ? 256 : (context.level == Level::Levelnone ? 2048 : 0)) - -#define MAX_STRIDE (context.level == Level::Level8K ? 8192 : (context.level == Level::Levelnone ? 2147483647 : 0)) - -namespace -{ -void ValidateOperator_ARGMAX(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor, with rank = rank(shape1)-1 shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8 - { - {"in_t", "int16_t"}, - {"out_t", "int32_t"}, - }, // signed 16 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_ai0sdq9wgm72(op, context); - ErrorIfCheck_gpp861oen43y(op, context); - LevelCheck_1lz89reckvj8d(op, context); -} - -void ValidateOperator_AVG_POOL2D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_out_t", {4, 4}}; /*Input tensor 4D shape=[N,IH,IW,C]*/ - const Argument kernel = {Category::Attribute, "kernel", "int32_t", {1, 1}}; /*[kernel_y, kernel_x] shape=[2]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument pad = {Category::Attribute, "pad", "int32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] - shape=[4]*/ - const Argument acc_size = { - Category::Attribute, - "acc_size", - "acc_t", - }; /*Enumerated type, must be one of INT32, FP16, FP32, as defined in the Supported Data Types table for this - operation shape=-*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_out_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output_zp = { - Category::Attribute, - "output_zp", - "in_out_t", - }; /*Output tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "in_out_t", {4, 4}}; /*Output tensor 4D shape=[N,OH,OW,C]*/ - const std::vector arguments = { - &input, - &kernel, - &stride, - &pad, - &acc_size, - &input_zp, - &output_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - {"acc_t", "int32_t"}, - }, // signed 8 with int32 accumulate - { - {"in_out_t", "int16_t"}, - {"acc_t", "int32_t"}, - }, // signed 16 with int32 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1vu5c1tytwmhu(op, context); - ErrorIfCheck_1n0denkrrrlr1(op, context); - ErrorIfCheck_36r4wpx3psd81(op, context); - ErrorIfCheck_1lrylbkd3w7ix(op, context); - ErrorIfCheck_ojmgqziimenu(op, context); - ErrorIfCheck_3vqy81ueu5wjk(op, context); - ErrorIfCheck_125xuezh1964i(op, context); - ErrorIfCheck_fqta626ku4qe(op, context); - ErrorIfCheck_ycjhrvf2yigr(op, context); - ErrorIfCheck_1c57olj698f3d(op, context); - LevelCheck_2i1ithnrq06wi(op, context); - LevelCheck_1wobi8axf7z2y(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); - LevelCheck_2n3xkkz3ip4mz(op, context); - LevelCheck_3o7qpmmd9ga58(op, context); - LevelCheck_16lxbjk2bszcz(op, context); - LevelCheck_2kwfb08mbiwpg(op, context); -} - -void ValidateOperator_CONV2D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,IC]*/ - const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW - shape=[OC,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[OC]*/ - const Argument pad = {Category::Attribute, "pad", "int32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] - shape=[4]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument dilation = {Category::Attribute, "dilation", "int32_t", {1, 1}}; /*[dilation_y, dilation_x] - shape=[2]*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument weight_zp = { - Category::Attribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,OC]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &pad, - &stride, - &dilation, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int8_t"}, - {"weight_t", "int4_t"}, - {"out_t", "int32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "int16_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1hby1qurzja4f(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); - ErrorIfCheck_ojmgqziimenu(op, context); - ErrorIfCheck_1lrylbkd3w7ix(op, context); - ErrorIfCheck_3fzsq78v5ypau(op, context); - ErrorIfCheck_2vhj6e48eyzlr(op, context); - ErrorIfCheck_147wc580l2tik(op, context); - ErrorIfCheck_2rm8rnsdfn14h(op, context); - ErrorIfCheck_36emtx7zwkk96(op, context); - ErrorIfCheck_2r9jencgka20o(op, context); - ErrorIfCheck_207p0r46d35m0(op, context); - ErrorIfCheck_cr43yjpqkcpd(op, context); - LevelCheck_1l00wczs5w70i(op, context); - LevelCheck_1hle41fus7cpl(op, context); - LevelCheck_2n3xkkz3ip4mz(op, context); - LevelCheck_3o7qpmmd9ga58(op, context); - LevelCheck_16lxbjk2bszcz(op, context); - LevelCheck_2kwfb08mbiwpg(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); -} - -void ValidateOperator_CONV3D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {5, 5}}; /*Input tensor shape=[N,ID,IH,IW,IC]*/ - const Argument weight = {Category::Input, "weight", "weight_t", {5, 5}}; /*Weight kernel size KDxKHxKW - shape=[OC,KD,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[OC]*/ - const Argument pad = {Category::Attribute, "pad", "int32_t", {1, 1}}; /*[pad_d0, pad_d1, pad_top, pad_bottom, - pad_left, pad_right] shape=[6]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_d, stride_y, stride_x] - shape=[3]*/ - const Argument dilation = {Category::Attribute, "dilation", "int32_t", {1, 1}}; /*[dilation_d, dilation_y, - dilation_x] shape=[3]*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument weight_zp = { - Category::Attribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {5, 5}}; /*Output tensor shape=[N,OD,OH,OW,OC]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &pad, - &stride, - &dilation, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int8_t"}, - {"weight_t", "int4_t"}, - {"out_t", "int32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "int16_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1hby1qurzja4f(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); - ErrorIfCheck_341t6ysqc16b2(op, context); - ErrorIfCheck_uqm570jwaqb6(op, context); - ErrorIfCheck_34iiwt6o66qfa(op, context); - ErrorIfCheck_llbd3iugmek0(op, context); - ErrorIfCheck_2vhj6e48eyzlr(op, context); - ErrorIfCheck_147wc580l2tik(op, context); - ErrorIfCheck_1w510kxt5b2b2(op, context); - ErrorIfCheck_27g3t38z1of4h(op, context); - ErrorIfCheck_95jvn4dzraol(op, context); - ErrorIfCheck_21377cjnb1ox7(op, context); - ErrorIfCheck_2cpco8ykx99sa(op, context); - LevelCheck_1npkwxnbypn8z(op, context); - LevelCheck_1l00wczs5w70i(op, context); - LevelCheck_1hle41fus7cpl(op, context); - LevelCheck_olu6vs8y9f61(op, context); - LevelCheck_3l4no1w1u6sq4(op, context); - LevelCheck_2n3xkkz3ip4mz(op, context); - LevelCheck_3o7qpmmd9ga58(op, context); - LevelCheck_16lxbjk2bszcz(op, context); - LevelCheck_2kwfb08mbiwpg(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); - LevelCheck_1416gon2u3sue(op, context); -} - -void ValidateOperator_DEPTHWISE_CONV2D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,H,W,C]*/ - const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW - shape=[KH,KW,C,M]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[C*M]*/ - const Argument pad = {Category::Attribute, "pad", "int32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] - shape=[4]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument dilation = {Category::Attribute, "dilation", "int32_t", {1, 1}}; /*[dilation_y, dilation_x] - shape=[2]*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument weight_zp = { - Category::Attribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,C*M]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &pad, - &stride, - &dilation, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int8_t"}, - {"weight_t", "int4_t"}, - {"out_t", "int32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "int16_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1hby1qurzja4f(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); - ErrorIfCheck_ojmgqziimenu(op, context); - ErrorIfCheck_1lrylbkd3w7ix(op, context); - ErrorIfCheck_3fzsq78v5ypau(op, context); - ErrorIfCheck_2vhj6e48eyzlr(op, context); - ErrorIfCheck_147wc580l2tik(op, context); - ErrorIfCheck_10sexbqileii7(op, context); - ErrorIfCheck_12rt0p658ac1(op, context); - ErrorIfCheck_3cem64qtn6ajr(op, context); - LevelCheck_1l00wczs5w70i(op, context); - LevelCheck_1hle41fus7cpl(op, context); - LevelCheck_2n3xkkz3ip4mz(op, context); - LevelCheck_3o7qpmmd9ga58(op, context); - LevelCheck_16lxbjk2bszcz(op, context); - LevelCheck_2kwfb08mbiwpg(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); -} - -void ValidateOperator_FULLY_CONNECTED(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {2, 2}}; /*Input tensor shape=[N,IC]*/ - const Argument weight = {Category::Attribute, "weight", "weight_t", {2, 2}}; /*Weights shape=[OC,IC]*/ - const Argument bias = {Category::Attribute, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[OC]*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument weight_zp = { - Category::Attribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {2, 2}}; /*Output tensor shape=[N,OC]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int8_t"}, - {"weight_t", "int4_t"}, - {"out_t", "int32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "int16_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1hby1qurzja4f(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); - ErrorIfCheck_3ufiqep5ipuco(op, context); - ErrorIfCheck_3kcipzq18dxv9(op, context); - ErrorIfCheck_jcjmr2nnatvv(op, context); - ErrorIfCheck_qwmo2w7hxola(op, context); - ErrorIfCheck_c9o11f07skde(op, context); -} - -void ValidateOperator_MATMUL(const regor::Operation *op, const Context &context) -{ - const Argument A = {Category::Input, "A", "in_t", {3, 3}}; /*Input tensor A, N matrices of size HxC shape=[N,H,C]*/ - const Argument B = {Category::Input, "B", "in_t", {3, 3}}; /*Input tensor B, N matrices of size CxW shape=[N,C,W]*/ - const Argument A_zp = { - Category::Attribute, - "A_zp", - "in_t", - }; /*Input tensor A zero point. Must be zero for non-int8 types. shape=-*/ - const Argument B_zp = { - Category::Attribute, - "B_zp", - "in_t", - }; /*Input tensor B zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {3, 3}}; /*Output tensor, N matrices of size HxW - shape=[N,H,W]*/ - const std::vector arguments = { - &A, - &B, - &A_zp, - &B_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int16_t"}, - {"out_t", "int48_t"}, - }, // signed 16x16 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1ellfcuw76b13(op, context); - ErrorIfCheck_h1uadv5irsu6(op, context); - ErrorIfCheck_1kfh97qingywb(op, context); - ErrorIfCheck_1azcq4511qzyx(op, context); -} - -void ValidateOperator_MAX_POOL2D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_out_t", {4, 4}}; /*Input tensor 4D shape=[N,IH,IW,C]*/ - const Argument kernel = {Category::Attribute, "kernel", "int32_t", {1, 1}}; /*[kernel_y, kernel_x] shape=[2]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument pad = {Category::Attribute, "pad", "int32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] - shape=[4]*/ - const Argument output = {Category::Output, "output", "in_out_t", {4, 4}}; /*Output tensor 4D shape=[N,OH,OW,C]*/ - const std::vector arguments = { - &input, - &kernel, - &stride, - &pad, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // 16-bit - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_36r4wpx3psd81(op, context); - ErrorIfCheck_1lrylbkd3w7ix(op, context); - ErrorIfCheck_ojmgqziimenu(op, context); - ErrorIfCheck_3vqy81ueu5wjk(op, context); - ErrorIfCheck_125xuezh1964i(op, context); - ErrorIfCheck_fqta626ku4qe(op, context); - ErrorIfCheck_ycjhrvf2yigr(op, context); - ErrorIfCheck_1c57olj698f3d(op, context); - LevelCheck_2i1ithnrq06wi(op, context); - LevelCheck_1wobi8axf7z2y(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); - LevelCheck_2n3xkkz3ip4mz(op, context); - LevelCheck_3o7qpmmd9ga58(op, context); - LevelCheck_16lxbjk2bszcz(op, context); - LevelCheck_2kwfb08mbiwpg(op, context); -} - -void ValidateOperator_TRANSPOSE_CONV2D(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,IC]*/ - const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW - shape=[OC,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[OC]*/ - const Argument out_pad = {Category::Attribute, "out_pad", "int32_t", {1, 1}}; /*[out_pad_top, out_pad_bottom, - out_pad_left, out_pad_right] - shape=[4]*/ - const Argument stride = {Category::Attribute, "stride", "int32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument out_shape = {Category::Attribute, "out_shape", "int32_t", {1, 1}}; /*[N,OH,OW,OC] shape=[4]*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument weight_zp = { - Category::Attribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,OC]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &out_pad, - &stride, - &out_shape, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "int8_t"}, - {"weight_t", "int4_t"}, - {"out_t", "int32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "int16_t"}, - {"weight_t", "int8_t"}, - {"out_t", "int48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1hby1qurzja4f(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); - ErrorIfCheck_q9dl3x81rc4o(op, context); - ErrorIfCheck_2rfkujt9lg7eq(op, context); - ErrorIfCheck_1lrylbkd3w7ix(op, context); - ErrorIfCheck_3nelbnmxyemot(op, context); - ErrorIfCheck_24conlof4w8eh(op, context); - ErrorIfCheck_2rm8rnsdfn14h(op, context); - ErrorIfCheck_36emtx7zwkk96(op, context); - ErrorIfCheck_2r9jencgka20o(op, context); - ErrorIfCheck_207p0r46d35m0(op, context); - ErrorIfCheck_cr43yjpqkcpd(op, context); - LevelCheck_17eyg1nicy12g(op, context); - LevelCheck_6qao6e1mxke0(op, context); - LevelCheck_pnaf5n03f8jg(op, context); - LevelCheck_me421i5r5j13(op, context); - LevelCheck_2ffhdgbz1kvxc(op, context); - LevelCheck_a0x2apl3zoz(op, context); - LevelCheck_1xppm0ufw64nq(op, context); - LevelCheck_as2lzdd5d28b(op, context); -} - -void ValidateOperator_CLAMP(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument min_val = { - Category::Attribute, - "min_val", - "in_out_t", - }; /*Minimum clip value shape=-*/ - const Argument max_val = { - Category::Attribute, - "max_val", - "in_out_t", - }; /*Maximum clip value shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type and shape as input shape=shape*/ - const std::vector arguments = { - &input, - &min_val, - &max_val, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_xod9coigx1x2(op, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_SIGMOID(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type and shape as input shape=shape*/ - const std::vector arguments = { - &input, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_TANH(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type and shape as input shape=shape*/ - const std::vector arguments = { - &input, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_ADD(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_ARITHMETIC_RIGHT_SHIFT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument round = { - Category::Attribute, - "round", - "bool_t", - }; /*If true then the shift is rounded shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &round, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_25jhgrylo2an5(op, context); -} - -void ValidateOperator_BITWISE_AND(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_BITWISE_OR(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_BITWISE_XOR(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_INTDIV(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_35z4hcgn21c8p(op, context); - RequireCheck_2v5c1x79g8j7o(op, context); -} - -void ValidateOperator_LOGICAL_AND(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_LOGICAL_LEFT_SHIFT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_3k2pr9vozq62t(op, context); -} - -void ValidateOperator_LOGICAL_RIGHT_SHIFT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_3k2pr9vozq62t(op, context); -} - -void ValidateOperator_LOGICAL_OR(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_LOGICAL_XOR(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_MAXIMUM(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_MINIMUM(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_MUL(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument shift = { - Category::Attribute, - "shift", - "uint6_t", - }; /*Result right shift (int32_t data type only) shape=-*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &shift, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8 - { - {"in_t", "int16_t"}, - {"out_t", "int32_t"}, - }, // signed 16 - { - {"in_t", "int32_t"}, - {"out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2gdayq6ofi7wx(op, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_27adsuj7sthvo(op, context); -} - -void ValidateOperator_POW(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_SUB(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_TABLE(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_t", - }; /*Input tensor shape=shape*/ - const Argument table = {Category::Attribute, "table", "table_t", {1, 1}}; /*Lookup table tensor shape=[TABLE_SIZE]*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor shape=shape*/ - const std::vector arguments = { - &input, - &table, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"table_t", "int8_t"}, - {"out_t", "int8_t"}, - {"TABLE_SIZE", "256"}, - }, // signed 8 - { - {"in_t", "int16_t"}, - {"table_t", "int16_t"}, - {"out_t", "int32_t"}, - {"TABLE_SIZE", "513"}, - }, // signed 16 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_3o6eotvyt76cz(op, context); -} - -void ValidateOperator_ABS(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_BITWISE_NOT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_CEIL(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_CLZ(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_EXP(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_FLOOR(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_LOG(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_LOGICAL_NOT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // Boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_NEGATE(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument input1_zp = { - Category::Attribute, - "input1_zp", - "in_out_t", - }; /*Input 1 zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output_zp = { - Category::Attribute, - "output_zp", - "in_out_t", - }; /*Output zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &input1_zp, - &output_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - {"acc_t", "int32_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - {"acc_t", "int32_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - {"acc_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_38qvty7pudfz2(op, context); - ErrorIfCheck_1n0denkrrrlr1(op, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_RECIPROCAL(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_RSQRT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_SELECT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "bool_t", - }; /*Input selector tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_out_t", - }; /*Input value tensor if input1 is True shape=shape2*/ - const Argument input3 = { - Category::Input, - "input3", - "in_out_t", - }; /*Input value tensor if input1 is False shape=shape3*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type as input2 and input3, with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &input3, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // Boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - ErrorIfCheck_3tccsjner0km9(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_EQUAL(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int32_t"}, - {"out_t", "bool_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_GREATER(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int32_t"}, - {"out_t", "bool_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_GREATER_EQUAL(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_t", - }; /*Input tensor shape=shape1*/ - const Argument input2 = { - Category::Input, - "input2", - "in_t", - }; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor with broadcast shape if necessary shape=shape*/ - const std::vector arguments = { - &input1, - &input2, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "int32_t"}, - {"out_t", "bool_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1yism57if6v2z(op, context); - ErrorIfCheck_3k5ug2w7gxc7r(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_REDUCE_ALL(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_REDUCE_ANY(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_REDUCE_MAX(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_REDUCE_MIN(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_REDUCE_PRODUCT(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_REDUCE_SUM(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with rank from 1 to 4 shape=shape1*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same rank as the input tensor. shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tg4p2a5te0jy(op, context); - ErrorIfCheck_33exz9gn2i1wy(op, context); -} - -void ValidateOperator_CONCAT(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*List of input tensors. All inputs must have the same rank and data type shape=shapes1[]*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis along which concatenation is to occur, in range from 0 to rank(shape)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor shape=shape*/ - const std::vector arguments = { - &input1, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_14slfd7r77hgh(op, context); - ErrorIfCheck_1fzhf02pkiw9z(op, context); - ErrorIfCheck_16s99hvsej4fo(op, context); - ErrorIfCheck_dctmd6sgn5n0(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_PAD(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor with minimum rank of one. shape=shape1*/ - const Argument padding = {Category::Attribute, "padding", "int32_t", {2, 2}}; /*Number of pad elements at the start - and end of each dimension - shape=[rank(shape1),2]*/ - const Argument pad_const = { - Category::Attribute, - "pad_const", - "in_out_t", - }; /*Constant value to be used as padding shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &padding, - &pad_const, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_14z7y0qe9lwps(op, context); - ErrorIfCheck_2rfef32dgp3be(op, context); - ErrorIfCheck_2sfcgak3rj1vs(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_RESHAPE(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape1*/ - const Argument new_shape = {Category::Attribute, "new_shape", "int32_t", {1, 1}}; /*List of values, with each - element giving the size of the - result tensor for the given - dimension. At most one dimension - may be given as -1 to - automatically calculate the - dimension size. - shape=[rank(shape)]*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &new_shape, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2a1jpygblc07i(op, context); - LevelCheck_1lz89reckvj8d(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_REVERSE(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_out_t", - }; /*Input tensor with minimum rank of one. shape=shape*/ - const Argument axis = { - Category::Attribute, - "axis", - "int32_t", - }; /*Axis to reverse, in range from 0 to rank(shape)-1 shape=-*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor. Same shape as input tensor shape=shape*/ - const std::vector arguments = { - &input, - &axis, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3hthyoock2ew5(op, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_SLICE(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor with minimum rank of one. shape=shape1*/ - const Argument start = {Category::Attribute, "start", "int32_t", {1, 1}}; /*List of integer coordinates, of length - equal to the rank of input1. Start - coordinate for slicing. - shape=[rank(shape1)]*/ - const Argument size = {Category::Attribute, "size", "int32_t", {1, 1}}; /*List of integer size values, of length - equal to the rank of input1. Size of the - input to be used. shape=[rank(shape1)]*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &start, - &size, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1nifeiq9rvmb8(op, context); - ErrorIfCheck_21rq6kn6p1yle(op, context); - ErrorIfCheck_3rghkieqip43o(op, context); - ErrorIfCheck_1cyv9n59wyyyc(op, context); - ErrorIfCheck_3oy2tclc6uhsu(op, context); - ErrorIfCheck_gpp3enlp1ddg(op, context); - ErrorIfCheck_ix9div4ld46q(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_TILE(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor with minimum rank of one. shape=shape1*/ - const Argument multiplies = {Category::Attribute, "multiplies", "int32_t", {1, 1}}; /*Number of times to replicate - input1 in each dimension - shape=[rank(shape1)]*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, rank as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &multiplies, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_21rq6kn6p1yle(op, context); - ErrorIfCheck_3estuseky2gm2(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_TRANSPOSE(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor with minimum rank of one. shape=shape1*/ - const Argument perms = {Category::Attribute, "perms", "int32_t", {1, 1}}; /*List of integers of length equal to the - rank of input1. Values must be valid - dimensions within shape1, and may not be - repeated. shape=[rank(shape1)]*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of same type, rank as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &perms, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_21rq6kn6p1yle(op, context); - ErrorIfCheck_2a1jpygblc07i(op, context); - ErrorIfCheck_5bq1fx1llv8(op, context); - ErrorIfCheck_ckwpttzajw06(op, context); - ErrorIfCheck_2n1ratxgd89tx(op, context); - ErrorIfCheck_aizwrn95lb0l(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_GATHER(const regor::Operation *op, const Context &context) -{ - const Argument values = {Category::Input, "values", "value_t", {3, 3}}; /*3D value tensor shape=[N,K,C]*/ - const Argument indices = {Category::Input, "indices", "index_t", {2, 2}}; /*2D index tensor shape=[N,W]*/ - const Argument output = {Category::Output, "output", "value_t", {3, 3}}; /*3D output tensor shape=[N,W,C]*/ - const std::vector arguments = { - &values, - &indices, - &output, - }; - const std::vector typesupports = { - { - {"value_t", "int8_t"}, - }, // signed 8 - { - {"value_t", "int16_t"}, - }, // signed 16 - { - {"value_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_294afuxnedk9i(op, context); - ErrorIfCheck_27p0n0pjt2bd6(op, context); - ErrorIfCheck_1uwmsen32dse1(op, context); - RequireCheck_31n0oq4yculbk(op, context); -} - -void ValidateOperator_SCATTER(const regor::Operation *op, const Context &context) -{ - const Argument values_in = {Category::Input, "values_in", "value_t", {3, 3}}; /*3D values in tensor shape=[N,K,C]*/ - const Argument indices = {Category::Input, "indices", "index_t", {2, 2}}; /*2D index tensor shape=[N,W]*/ - const Argument input = {Category::Input, "input", "value_t", {3, 3}}; /*3D input tensor shape=[N,W,C]*/ - const Argument values_out = {Category::Output, "values_out", "value_t", {3, 3}}; /*3D output tensor shape=[N,K,C]*/ - const std::vector arguments = { - &values_in, - &indices, - &input, - &values_out, - }; - const std::vector typesupports = { - { - {"value_t", "int8_t"}, - }, // signed 8 - { - {"value_t", "int16_t"}, - }, // signed 16 - { - {"value_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3c5bq3iswjd1x(op, context); - ErrorIfCheck_53yuoon46swi(op, context); - ErrorIfCheck_q9pgbwuvutqu(op, context); - ErrorIfCheck_1qdcccs22lqtr(op, context); - ErrorIfCheck_2azl8wc8mbsrj(op, context); - ErrorIfCheck_122a36k26p0au(op, context); - RequireCheck_31n0oq4yculbk(op, context); - RequireCheck_2apk8ly9uthz6(op, context); -} - -void ValidateOperator_RESIZE(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,C]*/ - const Argument scale = {Category::Attribute, "scale", "int16_t", {1, 1}}; /*[scale_y_n, scale_y_d, scale_x_n, - scale_x_d] shape=[4]*/ - const Argument offset = {Category::Attribute, "offset", "int16_t", {1, 1}}; /*[offset_y, offset_x] shape=[2]*/ - const Argument border = {Category::Attribute, "border", "int16_t", {1, 1}}; /*[border_y, border_x] shape=[2]*/ - const Argument mode = { - Category::Attribute, - "mode", - "mode_t", - }; /*BILINEAR or NEAREST shape=-*/ - const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,C]*/ - const std::vector arguments = { - &input, - &scale, - &offset, - &border, - &mode, - &output, - }; - const std::vector typesupports = { - { - {"resize_t", "int16_t"}, - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8, bilinear - { - {"resize_t", "int16_t"}, - {"in_t", "int8_t"}, - {"out_t", "int8_t"}, - }, // signed 8, nearest - { - {"resize_t", "int16_t"}, - {"in_t", "int16_t"}, - {"out_t", "int48_t"}, - }, // signed 16, bilinear - { - {"resize_t", "int16_t"}, - {"in_t", "int16_t"}, - {"out_t", "int16_t"}, - }, // signed 16, nearest - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3sfcy967j2w8w(op, context); - ErrorIfCheck_1obslcewwn583(op, context); - ErrorIfCheck_3oxfjen91qb6l(op, context); - ErrorIfCheck_1uo0z247e42af(op, context); - ErrorIfCheck_1eovh9pyc6tyw(op, context); - ErrorIfCheck_24jsin2zkf4ug(op, context); - ErrorIfCheck_12uj5fltk5rbo(op, context); - ErrorIfCheck_1py9f91imwjxe(op, context); - ErrorIfCheck_fn614zzdrdfd(op, context); - ErrorIfCheck_338aejy0aeqeg(op, context); - ErrorIfCheck_1c57olj698f3d(op, context); - LevelCheck_1r40jc4ashh6o(op, context); - LevelCheck_1u7rtl141felu(op, context); -} - -void ValidateOperator_CAST(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor shape=shape*/ - const std::vector arguments = { - &input, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "bool_t"}, - {"out_t", "int8_t"}, - }, // bool to signed 8 - { - {"in_t", "bool_t"}, - {"out_t", "int16_t"}, - }, // bool to signed 16 - { - {"in_t", "bool_t"}, - {"out_t", "int32_t"}, - }, // bool to signed 32 - { - {"in_t", "int8_t"}, - {"out_t", "bool_t"}, - }, // signed 8 to bool - { - {"in_t", "int8_t"}, - {"out_t", "int16_t"}, - }, // signed 8 to signed 16 - { - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8 to signed 32 - { - {"in_t", "int16_t"}, - {"out_t", "bool_t"}, - }, // signed 16 to bool - { - {"in_t", "int16_t"}, - {"out_t", "int8_t"}, - }, // signed 16 to signed 8 - { - {"in_t", "int16_t"}, - {"out_t", "int32_t"}, - }, // signed 16 to signed 32 - { - {"in_t", "int32_t"}, - {"out_t", "bool_t"}, - }, // signed 32 to bool - { - {"in_t", "int32_t"}, - {"out_t", "int8_t"}, - }, // signed 32 to signed 8 - { - {"in_t", "int32_t"}, - {"out_t", "int16_t"}, - }, // signed 32 to signed 16 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_RESCALE(const regor::Operation *op, const Context &context) -{ - const Argument input = { - Category::Input, - "input", - "in_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "out_t", - }; /*Output tensor with the same shape as input shape=shape*/ - const Argument input_zp = { - Category::Attribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument output_zp = { - Category::Attribute, - "output_zp", - "out_t", - }; /*Output tensor zero point. Must be zero for non-int8 types. shape=-*/ - const Argument multiplier = {Category::Attribute, "multiplier", "mul_t", {1, 1}}; /*Scaling multiplier array - shape=[NC]*/ - const Argument shift = {Category::Attribute, "shift", "uint6_t", {1, 1}}; /*Scaling shift array shape=[NC]*/ - const Argument scale32 = { - Category::Attribute, - "scale32", - "bool_t", - }; /*if (scale32) mul_t=int32_t else mul_t=int16_t shape=-*/ - const Argument double_round = { - Category::Attribute, - "double_round", - "bool_t", - }; /*Select double round mode shape=-*/ - const Argument per_channel = { - Category::Attribute, - "per_channel", - "bool_t", - }; /*if (per_channel) NC=shape[rank(shape)-1] else NC=1 shape=-*/ - const std::vector arguments = { - &input, - &output, - &input_zp, - &output_zp, - &multiplier, - &shift, - &scale32, - &double_round, - &per_channel, - }; - const std::vector typesupports = { - { - {"in_t", "int8_t"}, - {"out_t", "int8_t"}, - }, // signed 8 to signed 8 - { - {"in_t", "int8_t"}, - {"out_t", "int16_t"}, - }, // signed 8 to signed 16 - { - {"in_t", "int8_t"}, - {"out_t", "int32_t"}, - }, // signed 8 to signed 32 - { - {"in_t", "int8_t"}, - {"out_t", "uint8_t"}, - }, // signed 8 to unsigned 8 - { - {"in_t", "int16_t"}, - {"out_t", "int8_t"}, - }, // signed 16 to signed 8 - { - {"in_t", "int16_t"}, - {"out_t", "int16_t"}, - }, // signed 16 to signed 16 - { - {"in_t", "int16_t"}, - {"out_t", "int32_t"}, - }, // signed 16 to signed 32 - { - {"in_t", "int16_t"}, - {"out_t", "uint8_t"}, - }, // signed 16 to unsigned 8 - { - {"in_t", "int16_t"}, - {"out_t", "uint16_t"}, - }, // signed 16 to unsigned 16 - { - {"in_t", "int32_t"}, - {"out_t", "int8_t"}, - }, // signed 32 to signed 8 - { - {"in_t", "int32_t"}, - {"out_t", "int16_t"}, - }, // signed 32 to signed 16 - { - {"in_t", "int32_t"}, - {"out_t", "int32_t"}, - }, // signed 32 to signed 32 - { - {"in_t", "int48_t"}, - {"out_t", "int8_t"}, - }, // signed 48 to signed 8 - { - {"in_t", "int48_t"}, - {"out_t", "int16_t"}, - }, // signed 48 to signed 16 - { - {"in_t", "int48_t"}, - {"out_t", "int32_t"}, - }, // signed 48 to signed 32 - { - {"in_t", "uint8_t"}, - {"out_t", "int8_t"}, - }, // unsigned 8 to signed 8 - { - {"in_t", "uint8_t"}, - {"out_t", "int16_t"}, - }, // unsigned 8 to signed 16 - { - {"in_t", "uint16_t"}, - {"out_t", "int16_t"}, - }, // unsigned 16 to signed 16 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_7p5naeft5ga8(op, context); - ErrorIfCheck_2hqaqrremyime(op, context); - ErrorIfCheck_1wo90hck51cpk(op, context); - ErrorIfCheck_v4b9g32rnf6p(op, context); - ErrorIfCheck_22dev8it3bz2g(op, context); - ErrorIfCheck_3ms1pbkpa2td9(op, context); - ErrorIfCheck_31ty7f0kcbfxg(op, context); - ErrorIfCheck_10u6py7exa66n(op, context); - LevelCheck_1flzmpv6hubzc(op, context); -} - -void ValidateOperator_IDENTITY(const regor::Operation *op, const Context &context) -{ - const Argument input1 = { - Category::Input, - "input1", - "in_out_t", - }; /*Input tensor shape=shape*/ - const Argument output = { - Category::Output, - "output", - "in_out_t", - }; /*Output tensor of the same type, size as the input tensor shape=shape*/ - const std::vector arguments = { - &input1, - &output, - }; - const std::vector typesupports = { - { - {"in_out_t", "bool_t"}, - }, // Boolean - { - {"in_out_t", "int8_t"}, - }, // signed 8 - { - {"in_out_t", "int16_t"}, - }, // signed 16 - { - {"in_out_t", "int32_t"}, - }, // signed 32 - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_396rg8p65j58r(op, context); -} - -void ValidateOperator_COND_IF(const regor::Operation *op, const Context &context) -{ - const Argument input_list = { - Category::Input, - "input_list", - "tensor_list_t", - }; /*List of input tensors shape=-*/ - const Argument condition = { - Category::Input, - "condition", - "bool_t", - }; /*Input condition as a size 1 tensor shape=shape*/ - const Argument then_graph = { - Category::Attribute, - "then_graph", - "tosa_graph_t", - }; /*TOSA graph to execute if condition is true shape=-*/ - const Argument else_graph = { - Category::Attribute, - "else_graph", - "tosa_graph_t", - }; /*TOSA graph to execute if condition is false shape=-*/ - const Argument output_list = { - Category::Output, - "output_list", - "tensor_list_t", - }; /*List of output tensors shape=-*/ - const std::vector arguments = { - &input_list, - &condition, - &then_graph, - &else_graph, - &output_list, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1bm39avugkqqd(op, context); - ErrorIfCheck_3tv3oatlz37e2(op, context); - ErrorIfCheck_n7biu53x2n6k(op, context); - ErrorIfCheck_2fd4dk1zw032u(op, context); - ErrorIfCheck_omgw2xdm6irr(op, context); -} - -void ValidateOperator_WHILE_LOOP(const regor::Operation *op, const Context &context) -{ - const Argument input_list = { - Category::Input, - "input_list", - "tensor_list_t", - }; /*List of input tensors shape=-*/ - const Argument cond_graph = { - Category::Attribute, - "cond_graph", - "tosa_graph_t", - }; /*TOSA graph to evaluate the condition shape=-*/ - const Argument body_graph = { - Category::Attribute, - "body_graph", - "tosa_graph_t", - }; /*TOSA graph to execute the loop body shape=-*/ - const Argument output_list = { - Category::Output, - "output_list", - "tensor_list_t", - }; /*List of output tensors shape=-*/ - const std::vector arguments = { - &input_list, - &cond_graph, - &body_graph, - &output_list, - }; - const std::vector typesupports = {}; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_18hgmc3pexnw4(op, context); - ErrorIfCheck_12uu5ff3t3lv8(op, context); - ErrorIfCheck_3puzf7van5acf(op, context); - ErrorIfCheck_8tihij7a5ep0(op, context); - ErrorIfCheck_3lu68v2531bjz(op, context); - ErrorIfCheck_1fzl0zyxyd88z(op, context); -} - -} // namespace -namespace tosa -{ -namespace validator -{ - -void ValidateOperator_Version_0_60_0_Profile_BI(const GraphApi::GraphOperation *graphOp, const Context &context) -{ - const auto *op = static_cast(graphOp); - switch ( op->Type() ) - { - case regor::OpType::ArgMax: - ValidateOperator_ARGMAX(op, context); - break; - case regor::OpType::AvgPool: - ValidateOperator_AVG_POOL2D(op, context); - break; - case regor::OpType::Conv2D: - ValidateOperator_CONV2D(op, context); - break; - case regor::OpType::Conv3D: - ValidateOperator_CONV3D(op, context); - break; - case regor::OpType::DepthwiseConv2D: - ValidateOperator_DEPTHWISE_CONV2D(op, context); - break; - case regor::OpType::FullyConnected: - ValidateOperator_FULLY_CONNECTED(op, context); - break; - case regor::OpType::MatMul: - ValidateOperator_MATMUL(op, context); - break; - case regor::OpType::MaxPool: - ValidateOperator_MAX_POOL2D(op, context); - break; - case regor::OpType::TransposeConv2D: - ValidateOperator_TRANSPOSE_CONV2D(op, context); - break; - case regor::OpType::Clamp: - ValidateOperator_CLAMP(op, context); - break; - case regor::OpType::Sigmoid: - ValidateOperator_SIGMOID(op, context); - break; - case regor::OpType::Tanh: - ValidateOperator_TANH(op, context); - break; - case regor::OpType::Add: - ValidateOperator_ADD(op, context); - break; - case regor::OpType::Asr: - ValidateOperator_ARITHMETIC_RIGHT_SHIFT(op, context); - break; - case regor::OpType::And: - ValidateOperator_BITWISE_AND(op, context); - break; - case regor::OpType::Or: - ValidateOperator_BITWISE_OR(op, context); - break; - case regor::OpType::Xor: - ValidateOperator_BITWISE_XOR(op, context); - break; - case regor::OpType::Div: - ValidateOperator_INTDIV(op, context); - break; - case regor::OpType::LogicalAnd: - ValidateOperator_LOGICAL_AND(op, context); - break; - case regor::OpType::SHL: - ValidateOperator_LOGICAL_LEFT_SHIFT(op, context); - break; - case regor::OpType::SHR: - ValidateOperator_LOGICAL_RIGHT_SHIFT(op, context); - break; - case regor::OpType::LogicalOr: - ValidateOperator_LOGICAL_OR(op, context); - break; - case regor::OpType::LogicalXor: - ValidateOperator_LOGICAL_XOR(op, context); - break; - case regor::OpType::Maximum: - ValidateOperator_MAXIMUM(op, context); - break; - case regor::OpType::Minimum: - ValidateOperator_MINIMUM(op, context); - break; - case regor::OpType::Mul: - ValidateOperator_MUL(op, context); - break; - case regor::OpType::Pow: - ValidateOperator_POW(op, context); - break; - case regor::OpType::Sub: - ValidateOperator_SUB(op, context); - break; - case regor::OpType::Table: - ValidateOperator_TABLE(op, context); - break; - case regor::OpType::Abs: - ValidateOperator_ABS(op, context); - break; - case regor::OpType::Not: - ValidateOperator_BITWISE_NOT(op, context); - break; - case regor::OpType::Ceil: - ValidateOperator_CEIL(op, context); - break; - case regor::OpType::CLZ: - ValidateOperator_CLZ(op, context); - break; - case regor::OpType::Exp: - ValidateOperator_EXP(op, context); - break; - case regor::OpType::Floor: - ValidateOperator_FLOOR(op, context); - break; - case regor::OpType::Log: - ValidateOperator_LOG(op, context); - break; - case regor::OpType::LogicalNot: - ValidateOperator_LOGICAL_NOT(op, context); - break; - case regor::OpType::Neg: - ValidateOperator_NEGATE(op, context); - break; - case regor::OpType::Reciprocal: - ValidateOperator_RECIPROCAL(op, context); - break; - case regor::OpType::Rsqrt: - ValidateOperator_RSQRT(op, context); - break; - case regor::OpType::Select: - ValidateOperator_SELECT(op, context); - break; - case regor::OpType::Equal: - ValidateOperator_EQUAL(op, context); - break; - case regor::OpType::Greater: - ValidateOperator_GREATER(op, context); - break; - case regor::OpType::GreaterEqual: - ValidateOperator_GREATER_EQUAL(op, context); - break; - case regor::OpType::ReduceAll: - ValidateOperator_REDUCE_ALL(op, context); - break; - case regor::OpType::ReduceAny: - ValidateOperator_REDUCE_ANY(op, context); - break; - case regor::OpType::ReduceMax: - ValidateOperator_REDUCE_MAX(op, context); - break; - case regor::OpType::ReduceMin: - ValidateOperator_REDUCE_MIN(op, context); - break; - case regor::OpType::ReduceProduct: - ValidateOperator_REDUCE_PRODUCT(op, context); - break; - case regor::OpType::ReduceSum: - ValidateOperator_REDUCE_SUM(op, context); - break; - case regor::OpType::Concat: - ValidateOperator_CONCAT(op, context); - break; - case regor::OpType::Pad: - ValidateOperator_PAD(op, context); - break; - case regor::OpType::Reshape: - ValidateOperator_RESHAPE(op, context); - break; - case regor::OpType::Reverse: - ValidateOperator_REVERSE(op, context); - break; - case regor::OpType::Slice: - ValidateOperator_SLICE(op, context); - break; - case regor::OpType::Tile: - ValidateOperator_TILE(op, context); - break; - case regor::OpType::Transpose: - ValidateOperator_TRANSPOSE(op, context); - break; - case regor::OpType::Gather: - ValidateOperator_GATHER(op, context); - break; - case regor::OpType::Scatter: - ValidateOperator_SCATTER(op, context); - break; - case regor::OpType::Resize: - ValidateOperator_RESIZE(op, context); - break; - case regor::OpType::Cast: - ValidateOperator_CAST(op, context); - break; - case regor::OpType::Rescale: - ValidateOperator_RESCALE(op, context); - break; - case regor::OpType::Identity: - ValidateOperator_IDENTITY(op, context); - break; - case regor::OpType::If: - ValidateOperator_COND_IF(op, context); - break; - case regor::OpType::While: - ValidateOperator_WHILE_LOOP(op, context); - break; - case regor::OpType::Custom: // passthrough or validate later - break; - default: - throw std::invalid_argument("Unsupported operator"); - } -} - -} // namespace validator -} // namespace tosa diff --git a/ethosu/regor/tosa/tosa_validator_version_0_80_0_profile_bi.cpp b/ethosu/regor/tosa/tosa_validator_version_1_0_0_draft_profile_pro_int.cpp similarity index 83% rename from ethosu/regor/tosa/tosa_validator_version_0_80_0_profile_bi.cpp rename to ethosu/regor/tosa/tosa_validator_version_1_0_0_draft_profile_pro_int.cpp index 7f5dfb80..a58c60e6 100644 --- a/ethosu/regor/tosa/tosa_validator_version_0_80_0_profile_bi.cpp +++ b/ethosu/regor/tosa/tosa_validator_version_1_0_0_draft_profile_pro_int.cpp @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Automatically generated by tosaValidationGenerator for TOSA Specification 0.80.0 +// Automatically generated by tosaValidationGenerator for TOSA Specification 1.0.0draft // Do not edit. #include "compiler/operation.hpp" @@ -42,15 +42,22 @@ void ValidateOperator_ARGMAX(const regor::Operation *op, const Context &context) { const Argument input = {Category::Input, "input", "in_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis in range from 0 to rank(shape1) - 1 */ + }; /*Axis in range from 0 to rank(shape1) - 1 shape=-*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK - 1}}; /*Output tensor, with rank = rank(shape1) - 1 shape=shape*/ const std::vector arguments = { &input, &axis, + &nan_mode, &output, }; const std::vector typesupports = { @@ -71,36 +78,31 @@ void ValidateOperator_ARGMAX(const regor::Operation *op, const Context &context) void ValidateOperator_AVG_POOL2D(const regor::Operation *op, const Context &context) { - const Argument input = {Category::Input, "input", "in_out_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,C]*/ - const Argument kernel = {Category::Attribute, "kernel", "i32_t", {1, 1}}; /*[kernel_y, kernel_x] shape=[2]*/ - const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ + const Argument input = {Category::Input, "input", "in_out_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,C]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_out_t", {1, 1}}; /*Input tensor zero point. Must be zero + for non-int8 types. shape=[1]*/ + const Argument output_zp = {Category::Input, "output_zp", "in_out_t", {1, 1}}; /*Output tensor zero point. Must be + zero for non-int8 types. + shape=[1]*/ + const Argument kernel = {Category::Attribute, "kernel", "i32_t", {1, 1}}; /*[kernel_y, kernel_x] shape=[2]*/ + const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] shape=[4]*/ - const Argument acc_size = { - Category::ScalarAttribute, - "acc_size", - "acc_size_t", - }; /*Enumerated type, must be one of INT32, FP16, FP32, as defined in the Supported Data Types table for this - operation */ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_out_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument output_zp = { - Category::ScalarAttribute, - "output_zp", - "in_out_t", - }; /*Output tensor zero point. Must be zero for non-int8 types. */ + const Argument acc_type = { + Category::Attribute, + "acc_type", + "acc_type_t", + }; /*Enumerated type, must be one of INT32, FP16, FP32 matching the type of acc_t in the Supported Data Types table + for this operation shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {4, 4}}; /*Output tensor 4D shape=[N,OH,OW,C]*/ const std::vector arguments = { &input, + &input_zp, + &output_zp, &kernel, &stride, &pad, - &acc_size, - &input_zp, - &output_zp, + &acc_type, &output, }; const std::vector typesupports = { @@ -114,8 +116,8 @@ void ValidateOperator_AVG_POOL2D(const regor::Operation *op, const Context &cont }, // signed 16 with int32 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_4tfs5fdsigv(op, context); - ErrorIfCheck_3nav30dsmv6gd(op, context); + ErrorIfCheck_2nanft1ivm5fj(op, context); + ErrorIfCheck_1ga3gcg4zkrkv(op, context); ErrorIfCheck_36r4wpx3psd81(op, context); ErrorIfCheck_1lrylbkd3w7ix(op, context); ErrorIfCheck_ojmgqziimenu(op, context); @@ -139,38 +141,40 @@ void ValidateOperator_CONV2D(const regor::Operation *op, const Context &context) const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,IC]*/ const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW shape=[OC,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[BC]*/ + const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. + Bias data will + be broadcast if BC == 1. shape=[BC]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_t", {1, 1}}; /*Input tensor zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument weight_zp = {Category::Input, "weight_zp", "weight_t", {1, 1}}; /*Weight zero point. Must be zero for + non-int8 types. shape=[1]*/ const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] shape=[4]*/ const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ const Argument dilation = {Category::Attribute, "dilation", "i32_t", {1, 1}}; /*[dilation_y, dilation_x] shape=[2]*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument weight_zp = { - Category::ScalarAttribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. */ + const Argument acc_type = { + Category::Attribute, + "acc_type", + "acc_type_t", + }; /*Enumerated type, must be one of INT32, INT48, FP16, FP32 matching the type of acc_t in the Supported Data Types + table for this operation shape=-*/ const Argument local_bound = { - Category::ScalarAttribute, + Category::Attribute, "local_bound", "bool_t", }; /* This optional attribute affects the floating-point compliance error bound. The default of false allows for direct and transform based, fast convolution algorithms. Only set to true if direct dot-product calculation - precision is required. */ + precision is required. shape=-*/ const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,OC]*/ const std::vector arguments = { &input, &weight, &bias, + &input_zp, + &weight_zp, &pad, &stride, &dilation, - &input_zp, - &weight_zp, + &acc_type, &local_bound, &output, }; @@ -179,21 +183,24 @@ void ValidateOperator_CONV2D(const regor::Operation *op, const Context &context) {"in_t", "i8_t"}, {"weight_t", "i8_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x8 with int32 accumulate { {"in_t", "i8_t"}, {"weight_t", "i4_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x4 with int32 accumulate { {"in_t", "i16_t"}, {"weight_t", "i8_t"}, {"out_t", "i48_t"}, + {"acc_t", "i48_t"}, }, // signed 16x8 with int48 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2p5uniza3kjyg(op, context); - ErrorIfCheck_1md8k265hfj92(op, context); + ErrorIfCheck_1hrio849y2qnx(op, context); + ErrorIfCheck_31vgfyg6fi9t6(op, context); ErrorIfCheck_ojmgqziimenu(op, context); ErrorIfCheck_1lrylbkd3w7ix(op, context); ErrorIfCheck_3fzsq78v5ypau(op, context); @@ -218,40 +225,42 @@ void ValidateOperator_CONV3D(const regor::Operation *op, const Context &context) const Argument input = {Category::Input, "input", "in_t", {5, 5}}; /*Input tensor shape=[N,ID,IH,IW,IC]*/ const Argument weight = {Category::Input, "weight", "weight_t", {5, 5}}; /*Weight kernel size KDxKHxKW shape=[OC,KD,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[BC]*/ - const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_d0, pad_d1, pad_top, pad_bottom, - pad_left, pad_right] shape=[6]*/ - const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_d, stride_y, stride_x] - shape=[3]*/ + const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. + Bias data will + be broadcast if BC == 1. shape=[BC]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_t", {1, 1}}; /*Input tensor zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument weight_zp = {Category::Input, "weight_zp", "weight_t", {1, 1}}; /*Weight zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_d0, pad_d1, pad_top, pad_bottom, + pad_left, pad_right] shape=[6]*/ + const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_d, stride_y, stride_x] + shape=[3]*/ const Argument dilation = {Category::Attribute, "dilation", "i32_t", {1, 1}}; /*[dilation_d, dilation_y, dilation_x] shape=[3]*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument weight_zp = { - Category::ScalarAttribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. */ + const Argument acc_type = { + Category::Attribute, + "acc_type", + "acc_type_t", + }; /*Enumerated type, must be one of INT32, INT48, FP16, FP32 matching the type of acc_t in the Supported Data Types + table for this operation shape=-*/ const Argument local_bound = { - Category::ScalarAttribute, + Category::Attribute, "local_bound", "bool_t", }; /* This optional attribute affects the floating-point compliance error bound. The default of false allows for direct and transform based, fast convolution algorithms. Only set to true if direct dot-product calculation - precision is required. */ + precision is required. shape=-*/ const Argument output = {Category::Output, "output", "out_t", {5, 5}}; /*Output tensor shape=[N,OD,OH,OW,OC]*/ const std::vector arguments = { &input, &weight, &bias, + &input_zp, + &weight_zp, &pad, &stride, &dilation, - &input_zp, - &weight_zp, + &acc_type, &local_bound, &output, }; @@ -260,21 +269,24 @@ void ValidateOperator_CONV3D(const regor::Operation *op, const Context &context) {"in_t", "i8_t"}, {"weight_t", "i8_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x8 with int32 accumulate { {"in_t", "i8_t"}, {"weight_t", "i4_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x4 with int32 accumulate { {"in_t", "i16_t"}, {"weight_t", "i8_t"}, {"out_t", "i48_t"}, + {"acc_t", "i48_t"}, }, // signed 16x8 with int48 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2p5uniza3kjyg(op, context); - ErrorIfCheck_318wf63fa7ql0(op, context); + ErrorIfCheck_1hrio849y2qnx(op, context); + ErrorIfCheck_3m5ijs493bw6j(op, context); ErrorIfCheck_341t6ysqc16b2(op, context); ErrorIfCheck_uqm570jwaqb6(op, context); ErrorIfCheck_34iiwt6o66qfa(op, context); @@ -301,41 +313,43 @@ void ValidateOperator_CONV3D(const regor::Operation *op, const Context &context) void ValidateOperator_DEPTHWISE_CONV2D(const regor::Operation *op, const Context &context) { - const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,H,W,C]*/ + const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,C]*/ const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW shape=[KH,KW,C,M]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[BC]*/ + const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. + Bias data will + be broadcast if BC == 1. shape=[BC]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_t", {1, 1}}; /*Input tensor zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument weight_zp = {Category::Input, "weight_zp", "weight_t", {1, 1}}; /*Weight zero point. Must be zero for + non-int8 types. shape=[1]*/ const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] shape=[4]*/ const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ const Argument dilation = {Category::Attribute, "dilation", "i32_t", {1, 1}}; /*[dilation_y, dilation_x] shape=[2]*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument weight_zp = { - Category::ScalarAttribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. */ + const Argument acc_type = { + Category::Attribute, + "acc_type", + "acc_type_t", + }; /*Enumerated type, must be one of INT32, INT48, FP16, FP32 matching the type of acc_t in the Supported Data Types + table for this operation shape=-*/ const Argument local_bound = { - Category::ScalarAttribute, + Category::Attribute, "local_bound", "bool_t", }; /* This optional attribute affects the floating-point compliance error bound. The default of false allows for direct and transform based, fast convolution algorithms. Only set to true if direct dot-product calculation - precision is required. */ + precision is required. shape=-*/ const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,C*M]*/ const std::vector arguments = { &input, &weight, &bias, + &input_zp, + &weight_zp, &pad, &stride, &dilation, - &input_zp, - &weight_zp, + &acc_type, &local_bound, &output, }; @@ -344,29 +358,32 @@ void ValidateOperator_DEPTHWISE_CONV2D(const regor::Operation *op, const Context {"in_t", "i8_t"}, {"weight_t", "i8_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x8 with int32 accumulate { {"in_t", "i8_t"}, {"weight_t", "i4_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x4 with int32 accumulate { {"in_t", "i16_t"}, {"weight_t", "i8_t"}, {"out_t", "i48_t"}, + {"acc_t", "i48_t"}, }, // signed 16x8 with int48 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2p5uniza3kjyg(op, context); - ErrorIfCheck_318wf63fa7ql0(op, context); + ErrorIfCheck_1hrio849y2qnx(op, context); + ErrorIfCheck_3m5ijs493bw6j(op, context); ErrorIfCheck_ojmgqziimenu(op, context); ErrorIfCheck_1lrylbkd3w7ix(op, context); ErrorIfCheck_3fzsq78v5ypau(op, context); ErrorIfCheck_2vhj6e48eyzlr(op, context); ErrorIfCheck_147wc580l2tik(op, context); ErrorIfCheck_2d0jmyhr9lscf(op, context); - ErrorIfCheck_10sexbqileii7(op, context); - ErrorIfCheck_3cem64qtn6ajr(op, context); + ErrorIfCheck_10td4qt70dp3i(op, context); + ErrorIfCheck_1qxtjwwlh068t(op, context); LevelCheck_1l00wczs5w70i(op, context); LevelCheck_1hle41fus7cpl(op, context); LevelCheck_2n3xkkz3ip4mz(op, context); @@ -377,70 +394,14 @@ void ValidateOperator_DEPTHWISE_CONV2D(const regor::Operation *op, const Context LevelCheck_as2lzdd5d28b(op, context); } -void ValidateOperator_FULLY_CONNECTED(const regor::Operation *op, const Context &context) -{ - const Argument input = {Category::Input, "input", "in_t", {2, 2}}; /*Input tensor shape=[N,IC]*/ - const Argument weight = {Category::Input, "weight", "weight_t", {2, 2}}; /*Weights shape=[OC,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[BC]*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument weight_zp = { - Category::ScalarAttribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. */ - const Argument output = {Category::Output, "output", "out_t", {2, 2}}; /*Output tensor shape=[N,OC]*/ - const std::vector arguments = { - &input, - &weight, - &bias, - &input_zp, - &weight_zp, - &output, - }; - const std::vector typesupports = { - { - {"in_t", "i8_t"}, - {"weight_t", "i8_t"}, - {"out_t", "i32_t"}, - }, // signed 8x8 with int32 accumulate - { - {"in_t", "i8_t"}, - {"weight_t", "i4_t"}, - {"out_t", "i32_t"}, - }, // signed 8x4 with int32 accumulate - { - {"in_t", "i16_t"}, - {"weight_t", "i8_t"}, - {"out_t", "i48_t"}, - }, // signed 16x8 with int48 accumulate - }; - ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2p5uniza3kjyg(op, context); - ErrorIfCheck_318wf63fa7ql0(op, context); - ErrorIfCheck_1gr4n0iszdlxr(op, context); - ErrorIfCheck_3ufiqep5ipuco(op, context); - ErrorIfCheck_3kcipzq18dxv9(op, context); - ErrorIfCheck_c9o11f07skde(op, context); -} - void ValidateOperator_MATMUL(const regor::Operation *op, const Context &context) { const Argument A = {Category::Input, "A", "in_t", {3, 3}}; /*Input tensor A, N matrices of size HxC shape=[N,H,C]*/ const Argument B = {Category::Input, "B", "in_t", {3, 3}}; /*Input tensor B, N matrices of size CxW shape=[N,C,W]*/ - const Argument A_zp = { - Category::ScalarAttribute, - "A_zp", - "in_t", - }; /*Input tensor A zero point. Must be zero for non-int8 types. */ - const Argument B_zp = { - Category::ScalarAttribute, - "B_zp", - "in_t", - }; /*Input tensor B zero point. Must be zero for non-int8 types. */ + const Argument A_zp = {Category::Input, "A_zp", "in_t", {1, 1}}; /*Input tensor A zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument B_zp = {Category::Input, "B_zp", "in_t", {1, 1}}; /*Input tensor B zero point. Must be zero for + non-int8 types. shape=[1]*/ const Argument output = {Category::Output, "output", "out_t", {3, 3}}; /*Output tensor, N matrices of size HxW shape=[N,H,W]*/ const std::vector arguments = { @@ -461,7 +422,7 @@ void ValidateOperator_MATMUL(const regor::Operation *op, const Context &context) }, // signed 16x16 with int48 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_28csiz8foar64(op, context); + ErrorIfCheck_2autvayhidla8(op, context); ErrorIfCheck_h1uadv5irsu6(op, context); ErrorIfCheck_1kfh97qingywb(op, context); ErrorIfCheck_1azcq4511qzyx(op, context); @@ -474,12 +435,19 @@ void ValidateOperator_MAX_POOL2D(const regor::Operation *op, const Context &cont const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ const Argument pad = {Category::Attribute, "pad", "i32_t", {1, 1}}; /*[pad_top, pad_bottom, pad_left, pad_right] shape=[4]*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {4, 4}}; /*Output tensor 4D shape=[N,OH,OW,C]*/ const std::vector arguments = { &input, &kernel, &stride, &pad, + &nan_mode, &output, }; const std::vector typesupports = { @@ -514,39 +482,39 @@ void ValidateOperator_TRANSPOSE_CONV2D(const regor::Operation *op, const Context const Argument input = {Category::Input, "input", "in_t", {4, 4}}; /*Input tensor shape=[N,IH,IW,IC]*/ const Argument weight = {Category::Input, "weight", "weight_t", {4, 4}}; /*Weight kernel size KH x KW shape=[OC,KH,KW,IC]*/ - const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. shape=[BC]*/ - const Argument out_pad = {Category::Attribute, "out_pad", "i32_t", {1, 1}}; /*[out_pad_top, out_pad_bottom, - out_pad_left, out_pad_right] - shape=[4]*/ - const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ - const Argument out_shape = {Category::Attribute, "out_shape", "i32_t", {1, 1}}; /*[N,OH,OW,OC] shape=[4]*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. Must be zero for non-int8 types. */ - const Argument weight_zp = { - Category::ScalarAttribute, - "weight_zp", - "weight_t", - }; /*Weight zero point. Must be zero for non-int8 types. */ + const Argument bias = {Category::Input, "bias", "out_t", {1, 1}}; /*Per output channel bias data. + Bias data will + be broadcast if BC == 1. shape=[BC]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_t", {1, 1}}; /*Input tensor zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument weight_zp = {Category::Input, "weight_zp", "weight_t", {1, 1}}; /*Weight zero point. Must be zero for + non-int8 types. shape=[1]*/ + const Argument out_pad = {Category::Attribute, "out_pad", "i32_t", {1, 1}}; /*[out_pad_top, out_pad_bottom, + out_pad_left, out_pad_right] + shape=[4]*/ + const Argument stride = {Category::Attribute, "stride", "i32_t", {1, 1}}; /*[stride_y, stride_x] shape=[2]*/ + const Argument acc_type = { + Category::Attribute, + "acc_type", + "acc_type_t", + }; /*Enumerated type, must be one of INT32, INT48, FP16, FP32 matching the type of acc_t in the Supported Data Types + table for this operation shape=-*/ const Argument local_bound = { - Category::ScalarAttribute, + Category::Attribute, "local_bound", "bool_t", }; /* This optional attribute affects the floating-point compliance error bound. The default of false allows for direct and transform based, fast convolution algorithms. Only set to true if direct dot-product calculation - precision is required. */ + precision is required. shape=-*/ const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,OC]*/ const std::vector arguments = { &input, &weight, &bias, - &out_pad, - &stride, - &out_shape, &input_zp, &weight_zp, + &out_pad, + &stride, + &acc_type, &local_bound, &output, }; @@ -555,21 +523,24 @@ void ValidateOperator_TRANSPOSE_CONV2D(const regor::Operation *op, const Context {"in_t", "i8_t"}, {"weight_t", "i8_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x8 with int32 accumulate { {"in_t", "i8_t"}, {"weight_t", "i4_t"}, {"out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 8x4 with int32 accumulate { {"in_t", "i16_t"}, {"weight_t", "i8_t"}, {"out_t", "i48_t"}, + {"acc_t", "i48_t"}, }, // signed 16x8 with int48 accumulate }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_2p5uniza3kjyg(op, context); - ErrorIfCheck_318wf63fa7ql0(op, context); + ErrorIfCheck_1hrio849y2qnx(op, context); + ErrorIfCheck_3m5ijs493bw6j(op, context); ErrorIfCheck_q9dl3x81rc4o(op, context); ErrorIfCheck_2rfkujt9lg7eq(op, context); ErrorIfCheck_1lrylbkd3w7ix(op, context); @@ -593,21 +564,28 @@ void ValidateOperator_CLAMP(const regor::Operation *op, const Context &context) { const Argument input = {Category::Input, "input", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape*/ const Argument min_val = { - Category::ScalarAttribute, + Category::Attribute, "min_val", "in_out_t", - }; /*Minimum clip value */ + }; /*Minimum clip value shape=-*/ const Argument max_val = { - Category::ScalarAttribute, + Category::Attribute, "max_val", "in_out_t", - }; /*Maximum clip value */ + }; /*Maximum clip value shape=-*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor of same type and shape as input shape=shape*/ const std::vector arguments = { &input, &min_val, &max_val, + &nan_mode, &output, }; const std::vector typesupports = { @@ -620,6 +598,7 @@ void ValidateOperator_CLAMP(const regor::Operation *op, const Context &context) }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_xod9coigx1x2(op, context); + ErrorIfCheck_15y4an3ceern5(op, context); ErrorIfCheck_10u6py7exa66n(op, context); LevelCheck_1flzmpv6hubzc(op, context); } @@ -659,8 +638,7 @@ void ValidateOperator_ADD(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -670,12 +648,8 @@ void ValidateOperator_ADD(const regor::Operation *op, const Context &context) { {"in_out_t", "i32_t"}, }, // signed 32 - { - {"in_out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tu2mqt96ickt(op, context); ErrorIfCheck_1hynqeiugz9lt(op, context); ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); @@ -688,12 +662,11 @@ void ValidateOperator_ARITHMETIC_RIGHT_SHIFT(const regor::Operation *op, const C const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ const Argument round = { - Category::ScalarAttribute, + Category::Attribute, "round", "bool_t", - }; /*If true then the shift is rounded */ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + }; /*If true then the shift is rounded shape=-*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -716,7 +689,7 @@ void ValidateOperator_ARITHMETIC_RIGHT_SHIFT(const regor::Operation *op, const C ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_7uc4ey0qoi0f(op, context); + RequireCheck_3gogyrefl20gp(op, context); } void ValidateOperator_BITWISE_AND(const regor::Operation *op, const Context &context) @@ -724,8 +697,7 @@ void ValidateOperator_BITWISE_AND(const regor::Operation *op, const Context &con const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -754,8 +726,7 @@ void ValidateOperator_BITWISE_OR(const regor::Operation *op, const Context &cont const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -784,8 +755,7 @@ void ValidateOperator_BITWISE_XOR(const regor::Operation *op, const Context &con const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -814,8 +784,7 @@ void ValidateOperator_INTDIV(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -825,18 +794,13 @@ void ValidateOperator_INTDIV(const regor::Operation *op, const Context &context) { {"in_out_t", "i32_t"}, }, // signed 32 - { - {"in_out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tu2mqt96ickt(op, context); ErrorIfCheck_1hynqeiugz9lt(op, context); ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); RequireCheck_35z4hcgn21c8p(op, context); - RequireCheck_32ckjbsfiesgu(op, context); } void ValidateOperator_LOGICAL_AND(const regor::Operation *op, const Context &context) @@ -844,8 +808,7 @@ void ValidateOperator_LOGICAL_AND(const regor::Operation *op, const Context &con const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -854,7 +817,7 @@ void ValidateOperator_LOGICAL_AND(const regor::Operation *op, const Context &con const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_1hynqeiugz9lt(op, context); @@ -868,8 +831,7 @@ void ValidateOperator_LOGICAL_LEFT_SHIFT(const regor::Operation *op, const Conte const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -891,7 +853,7 @@ void ValidateOperator_LOGICAL_LEFT_SHIFT(const regor::Operation *op, const Conte ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_3k2pr9vozq62t(op, context); + RequireCheck_3gogyrefl20gp(op, context); } void ValidateOperator_LOGICAL_RIGHT_SHIFT(const regor::Operation *op, const Context &context) @@ -899,8 +861,7 @@ void ValidateOperator_LOGICAL_RIGHT_SHIFT(const regor::Operation *op, const Cont const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -922,7 +883,7 @@ void ValidateOperator_LOGICAL_RIGHT_SHIFT(const regor::Operation *op, const Cont ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); - RequireCheck_1h6xoevynk8a0(op, context); + RequireCheck_3gogyrefl20gp(op, context); } void ValidateOperator_LOGICAL_OR(const regor::Operation *op, const Context &context) @@ -930,8 +891,7 @@ void ValidateOperator_LOGICAL_OR(const regor::Operation *op, const Context &cont const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -940,7 +900,7 @@ void ValidateOperator_LOGICAL_OR(const regor::Operation *op, const Context &cont const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_1hynqeiugz9lt(op, context); @@ -954,8 +914,7 @@ void ValidateOperator_LOGICAL_XOR(const regor::Operation *op, const Context &con const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -964,7 +923,7 @@ void ValidateOperator_LOGICAL_XOR(const regor::Operation *op, const Context &con const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_1hynqeiugz9lt(op, context); @@ -978,11 +937,17 @@ void ValidateOperator_MAXIMUM(const regor::Operation *op, const Context &context const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, + &nan_mode, &output, }; const std::vector typesupports = { @@ -1002,11 +967,17 @@ void ValidateOperator_MINIMUM(const regor::Operation *op, const Context &context const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, + &nan_mode, &output, }; const std::vector typesupports = { @@ -1026,13 +997,9 @@ void ValidateOperator_MUL(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument shift = { - Category::ScalarAttribute, - "shift", - "i8_t", - }; /*Result right shift (i32_t data type only) */ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with broadcast shape - if necessary shape=shape*/ + const Argument shift = {Category::Input, "shift", "i8_t", {1, 1}}; /*Result right shift (used only when in_t is + i32_t) shape=[1]*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1052,20 +1019,15 @@ void ValidateOperator_MUL(const regor::Operation *op, const Context &context) {"in_t", "i32_t"}, {"out_t", "i32_t"}, }, // signed 32 - { - {"in_t", "shape_t"}, - {"out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tu2mqt96ickt(op, context); ErrorIfCheck_1hynqeiugz9lt(op, context); ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); RequireCheck_2f51h19mqfhr8(op, context); - RequireCheck_1oaur42wgph0t(op, context); - RequireCheck_3dbpm758kyex1(op, context); + RequireCheck_3jqx5d6a2c85r(op, context); + RequireCheck_1b64l72fvni7o(op, context); } void ValidateOperator_POW(const regor::Operation *op, const Context &context) @@ -1073,8 +1035,7 @@ void ValidateOperator_POW(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1086,6 +1047,10 @@ void ValidateOperator_POW(const regor::Operation *op, const Context &context) ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); LevelCheck_1flzmpv6hubzc(op, context); + RequireCheck_3otz8rylb4eh1(op, context); + RequireCheck_2p74g4god707n(op, context); + RequireCheck_61j2lms4vo0v(op, context); + RequireCheck_3nkub9jwwaf4h(op, context); } void ValidateOperator_SUB(const regor::Operation *op, const Context &context) @@ -1093,8 +1058,7 @@ void ValidateOperator_SUB(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_out_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor with broadcast - shape if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1104,12 +1068,8 @@ void ValidateOperator_SUB(const regor::Operation *op, const Context &context) { {"in_out_t", "i32_t"}, }, // signed 32 - { - {"in_out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_3tu2mqt96ickt(op, context); ErrorIfCheck_1hynqeiugz9lt(op, context); ErrorIfCheck_1yism57if6v2z(op, context); ErrorIfCheck_3k5ug2w7gxc7r(op, context); @@ -1118,11 +1078,11 @@ void ValidateOperator_SUB(const regor::Operation *op, const Context &context) void ValidateOperator_TABLE(const regor::Operation *op, const Context &context) { - const Argument input = {Category::Input, "input", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape*/ - const Argument table = {Category::Attribute, "table", "table_t", {1, 1}}; /*Lookup table tensor shape=[TABLE_SIZE]*/ + const Argument input1 = {Category::Input, "input1", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape*/ + const Argument table = {Category::Input, "table", "table_t", {1, 1}}; /*Lookup table tensor shape=[TABLE_SIZE]*/ const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { - &input, + &input1, &table, &output, }; @@ -1141,7 +1101,7 @@ void ValidateOperator_TABLE(const regor::Operation *op, const Context &context) }, // signed 16 }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_10u6py7exa66n(op, context); + ErrorIfCheck_396rg8p65j58r(op, context); LevelCheck_1flzmpv6hubzc(op, context); RequireCheck_3o6eotvyt76cz(op, context); } @@ -1299,16 +1259,10 @@ void ValidateOperator_LOGICAL_NOT(const regor::Operation *op, const Context &con void ValidateOperator_NEGATE(const regor::Operation *op, const Context &context) { const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape*/ - const Argument input1_zp = { - Category::ScalarAttribute, - "input1_zp", - "in_out_t", - }; /*Input 1 zero point. Must be zero for non-int8 types. */ - const Argument output_zp = { - Category::ScalarAttribute, - "output_zp", - "in_out_t", - }; /*Output zero point. Must be zero for non-int8 types. */ + const Argument input1_zp = {Category::Input, "input1_zp", "in_out_t", {1, 1}}; /*Input 1 zero point. Must be zero + for non-int8 types. shape=[1]*/ + const Argument output_zp = {Category::Input, "output_zp", "in_out_t", {1, 1}}; /*Output zero point. Must be zero for + non-int8 types. shape=[1]*/ const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor of same type, size as the input tensor shape=shape*/ @@ -1333,8 +1287,8 @@ void ValidateOperator_NEGATE(const regor::Operation *op, const Context &context) }, // signed 32 }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1advtk54oueo2(op, context); - ErrorIfCheck_3nav30dsmv6gd(op, context); + ErrorIfCheck_3l2ksvk26m07h(op, context); + ErrorIfCheck_1ga3gcg4zkrkv(op, context); ErrorIfCheck_396rg8p65j58r(op, context); LevelCheck_1flzmpv6hubzc(op, context); } @@ -1379,9 +1333,7 @@ void ValidateOperator_SELECT(const regor::Operation *op, const Context &context) const Argument input3 = {Category::Input, "input3", "in_out_t", {0, MAX_RANK}}; /*Input value tensor if input1 is False shape=shape3*/ const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor of same type as - input2 and input3, with - broadcast shape if necessary - shape=shape*/ + input2 and input3 shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1415,8 +1367,7 @@ void ValidateOperator_EQUAL(const regor::Operation *op, const Context &context) const Argument input1 = {Category::Input, "input1", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with broadcast shape - if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1440,8 +1391,7 @@ void ValidateOperator_GREATER(const regor::Operation *op, const Context &context const Argument input1 = {Category::Input, "input1", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with broadcast shape - if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1465,8 +1415,7 @@ void ValidateOperator_GREATER_EQUAL(const regor::Operation *op, const Context &c const Argument input1 = {Category::Input, "input1", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument input2 = {Category::Input, "input2", "in_t", {0, MAX_RANK}}; /*Input tensor with the same rank as input1 shape=shape2*/ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with broadcast shape - if necessary shape=shape*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, &input2, @@ -1489,10 +1438,10 @@ void ValidateOperator_REDUCE_ALL(const regor::Operation *op, const Context &cont { const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { @@ -1503,7 +1452,7 @@ void ValidateOperator_REDUCE_ALL(const regor::Operation *op, const Context &cont const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_3tg4p2a5te0jy(op, context); @@ -1514,10 +1463,10 @@ void ValidateOperator_REDUCE_ANY(const regor::Operation *op, const Context &cont { const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { @@ -1528,7 +1477,7 @@ void ValidateOperator_REDUCE_ANY(const regor::Operation *op, const Context &cont const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_3tg4p2a5te0jy(op, context); @@ -1539,15 +1488,22 @@ void ValidateOperator_REDUCE_MAX(const regor::Operation *op, const Context &cont { const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { &input, &axis, + &nan_mode, &output, }; const std::vector typesupports = { @@ -1570,15 +1526,22 @@ void ValidateOperator_REDUCE_MIN(const regor::Operation *op, const Context &cont { const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ + const Argument nan_mode = { + Category::Attribute, + "nan_mode", + "nan_propagation_mode_t", + }; /* PROPAGATE or IGNORE. Set to PROPAGATE by default. This attribute affects the floating-point NaN propagation + approach. This attribute is ignored by non floating-point types. shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { &input, &axis, + &nan_mode, &output, }; const std::vector typesupports = { @@ -1601,10 +1564,10 @@ void ValidateOperator_REDUCE_PRODUCT(const regor::Operation *op, const Context & { const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { @@ -1620,13 +1583,12 @@ void ValidateOperator_REDUCE_PRODUCT(const regor::Operation *op, const Context & void ValidateOperator_REDUCE_SUM(const regor::Operation *op, const Context &context) { - const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor with rank from 1 to 4 - shape=shape1*/ + const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reduce, in range from 0 to rank(shape1)-1 */ + }; /*Axis to reduce, in range from 0 to rank(shape1)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same rank as the input tensor. shape=shape*/ const std::vector arguments = { @@ -1637,6 +1599,7 @@ void ValidateOperator_REDUCE_SUM(const regor::Operation *op, const Context &cont const std::vector typesupports = { { {"in_out_t", "i32_t"}, + {"acc_t", "i32_t"}, }, // signed 32 }; ValidateArguments(op, arguments, typesupports, context); @@ -1646,14 +1609,14 @@ void ValidateOperator_REDUCE_SUM(const regor::Operation *op, const Context &cont void ValidateOperator_CONCAT(const regor::Operation *op, const Context &context) { - const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*List of input tensors. All inputs + const Argument input1 = {Category::Input, "input1", "in_out_t", {1, MAX_RANK}}; /*List of input tensors. All inputs must have the same rank and data type shape=shapes1*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis along which concatenation is to occur, in range from 0 to rank(shape)-1 */ + }; /*Axis along which concatenation is to occur, in range from 0 to rank(shape)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &input1, @@ -1663,7 +1626,7 @@ void ValidateOperator_CONCAT(const regor::Operation *op, const Context &context) const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1673,30 +1636,29 @@ void ValidateOperator_CONCAT(const regor::Operation *op, const Context &context) { {"in_out_t", "i32_t"}, }, // signed 32 - { - {"in_out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); + ErrorIfCheck_2d3qdl1f70i6y(op, context); ErrorIfCheck_5y7ov1oeymoa(op, context); - ErrorIfCheck_oln8qpyh6lba(op, context); - ErrorIfCheck_3thipxl768n8b(op, context); - ErrorIfCheck_16s99hvsej4fo(op, context); - ErrorIfCheck_3bzibvkt1zqng(op, context); + ErrorIfCheck_1aloht2b77zby(op, context); + ErrorIfCheck_f1kt9a6h7s2p(op, context); + ErrorIfCheck_302z1f8mq8lg7(op, context); + LevelCheck_3tcyujqdy8gol(op, context); LevelCheck_1flzmpv6hubzc(op, context); } void ValidateOperator_PAD(const regor::Operation *op, const Context &context) { const Argument input1 = {Category::Input, "input1", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ - const Argument padding = {Category::Input, "padding", "shape_t", {2, 2}}; /*Number of pad elements at the start and - end of each dimension - shape=[rank(shape1),2]*/ - const Argument pad_const = { - Category::ScalarAttribute, - "pad_const", - "in_out_t", - }; /*Constant value to be used as padding */ + const Argument padding = {Category::Input, "padding", "shape_t", {1, 1}}; /*Number of pad elements at the start and + end of each dimension. The values in + padding are interpreted as start, end of + each dimension. As an example for rank 2, + the values would be interpreted as + [start_dim0, end_dim0, start_dim1, + end_dim1]. shape=[2*rank(shape1)]*/ + const Argument pad_const = {Category::Input, "pad_const", "in_out_t", {1, 1}}; /*The value to be used as padding. + shape=[1]*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor of same type as the input tensor shape=shape*/ const std::vector arguments = { @@ -1708,7 +1670,7 @@ void ValidateOperator_PAD(const regor::Operation *op, const Context &context) const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1721,17 +1683,17 @@ void ValidateOperator_PAD(const regor::Operation *op, const Context &context) }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_14z7y0qe9lwps(op, context); - ErrorIfCheck_2rfef32dgp3be(op, context); - ErrorIfCheck_2sfcgak3rj1vs(op, context); + ErrorIfCheck_3dvn5k3273lwz(op, context); + ErrorIfCheck_34zvbtwx1r18j(op, context); LevelCheck_1flzmpv6hubzc(op, context); } void ValidateOperator_RESHAPE(const regor::Operation *op, const Context &context) { - const Argument input1 = {Category::Input, "input1", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ - const Argument shape = {Category::Attribute, "shape", "shape_t", {1, 1}}; /*1D shape tensor giving the new shape. - shape=[rank(shape)]*/ - const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor of same type, size + const Argument input1 = {Category::Input, "input1", "in_out_t", {0, MAX_RANK}}; /*Input tensor shape=shape1*/ + const Argument shape = {Category::Input, "shape", "shape_t", {1, 1}}; /*shape_t giving the new shape. + shape=[rank(shape)]*/ + const Argument output = {Category::Output, "output", "in_out_t", {0, MAX_RANK}}; /*Output tensor of same type, size as the input tensor shape=shape*/ const std::vector arguments = { @@ -1742,7 +1704,7 @@ void ValidateOperator_RESHAPE(const regor::Operation *op, const Context &context const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1761,23 +1723,23 @@ void ValidateOperator_RESHAPE(const regor::Operation *op, const Context &context void ValidateOperator_REVERSE(const regor::Operation *op, const Context &context) { - const Argument input = {Category::Input, "input", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape*/ + const Argument input1 = {Category::Input, "input1", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape*/ const Argument axis = { - Category::ScalarAttribute, + Category::Attribute, "axis", "i32_t", - }; /*Axis to reverse, in range from 0 to rank(shape)-1 */ + }; /*Axis to reverse, in range from 0 to rank(shape)-1 shape=-*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor. Same shape as input tensor shape=shape*/ const std::vector arguments = { - &input, + &input1, &axis, &output, }; const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1787,26 +1749,22 @@ void ValidateOperator_REVERSE(const regor::Operation *op, const Context &context { {"in_out_t", "i32_t"}, }, // signed 32 - { - {"in_out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_3hthyoock2ew5(op, context); - ErrorIfCheck_10u6py7exa66n(op, context); + ErrorIfCheck_396rg8p65j58r(op, context); LevelCheck_1flzmpv6hubzc(op, context); } void ValidateOperator_SLICE(const regor::Operation *op, const Context &context) { const Argument input1 = {Category::Input, "input1", "in_out_t", {1, MAX_RANK}}; /*Input tensor shape=shape1*/ - const Argument start = {Category::Attribute, "start", "index_t", {1, 1}}; /*List of integer coordinates, of length - equal to the rank of input1. Start - coordinate for slicing. - shape=[rank(shape1)]*/ - const Argument size = {Category::Attribute, "size", "index_t", {1, 1}}; /*List of integer size values, of length - equal to the rank of input1. Size of the - input to be used. shape=[rank(shape1)]*/ + const Argument start = {Category::Input, "start", "shape_t", {1, 1}}; /*List of integer coordinates, of length equal + to the rank of input1. Start coordinate for + slicing. shape=[rank(shape1)]*/ + const Argument size = {Category::Input, "size", "shape_t", {1, 1}}; /*List of integer size values, of length equal + to the rank of input1. Size of the input to be + used. shape=[rank(shape1)]*/ const Argument output = {Category::Output, "output", "in_out_t", {1, MAX_RANK}}; /*Output tensor of same type as the input tensor shape=shape*/ const std::vector arguments = { @@ -1818,7 +1776,7 @@ void ValidateOperator_SLICE(const regor::Operation *op, const Context &context) const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1857,7 +1815,7 @@ void ValidateOperator_TILE(const regor::Operation *op, const Context &context) const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1892,7 +1850,7 @@ void ValidateOperator_TRANSPOSE(const regor::Operation *op, const Context &conte const std::vector typesupports = { { {"in_out_t", "bool_t"}, - }, // boolean + }, // Boolean { {"in_out_t", "i8_t"}, }, // signed 8 @@ -1925,12 +1883,15 @@ void ValidateOperator_GATHER(const regor::Operation *op, const Context &context) }; const std::vector typesupports = { { + {"index_t", "i32_t"}, {"in_out_t", "i8_t"}, }, // signed 8 { + {"index_t", "i32_t"}, {"in_out_t", "i16_t"}, }, // signed 16 { + {"index_t", "i32_t"}, {"in_out_t", "i32_t"}, }, // signed 32 }; @@ -1955,12 +1916,15 @@ void ValidateOperator_SCATTER(const regor::Operation *op, const Context &context }; const std::vector typesupports = { { + {"index_t", "i32_t"}, {"in_out_t", "i8_t"}, }, // signed 8 { + {"index_t", "i32_t"}, {"in_out_t", "i16_t"}, }, // signed 16 { + {"index_t", "i32_t"}, {"in_out_t", "i32_t"}, }, // signed 32 }; @@ -1983,10 +1947,10 @@ void ValidateOperator_RESIZE(const regor::Operation *op, const Context &context) const Argument offset = {Category::Input, "offset", "shape_t", {1, 1}}; /*[offset_y, offset_x] shape=[2]*/ const Argument border = {Category::Input, "border", "shape_t", {1, 1}}; /*[border_y, border_x] shape=[2]*/ const Argument mode = { - Category::ScalarAttribute, + Category::Attribute, "mode", "resize_mode_t", - }; /*BILINEAR or NEAREST */ + }; /*BILINEAR or NEAREST shape=-*/ const Argument output = {Category::Output, "output", "out_t", {4, 4}}; /*Output tensor shape=[N,OH,OW,C]*/ const std::vector arguments = { &input, @@ -2100,60 +2064,58 @@ void ValidateOperator_CAST(const regor::Operation *op, const Context &context) void ValidateOperator_RESCALE(const regor::Operation *op, const Context &context) { const Argument input = {Category::Input, "input", "in_t", {0, MAX_RANK}}; /*Input tensor shape=shape*/ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with the same shape as - input shape=shape*/ - const Argument input_zp = { - Category::ScalarAttribute, - "input_zp", - "in_t", - }; /*Input tensor zero point. int8/uint8 can have zero point within their valid range. uint16 zero point must be - either 0 or 32768. All other types must have zero point equal to 0. */ - const Argument output_zp = { - Category::ScalarAttribute, - "output_zp", - "out_t", - }; /*Output tensor zero point.int8/uint8 can have zero point within their valid range. uint16 zero point must be - either 0 or 32768. All other types must have zero point equal to 0. */ - const Argument multiplier = {Category::Attribute, "multiplier", "mul_t", {1, 1}}; /*Scaling multiplier array - shape=[NC]*/ - const Argument shift = {Category::Attribute, "shift", "i8_t", {1, 1}}; /*Scaling shift array shape=[NC]*/ + const Argument multiplier = {Category::Input, "multiplier", "mul_t", {1, 1}}; /*Scaling multiplier array + shape=[NC]*/ + const Argument shift = {Category::Input, "shift", "i8_t", {1, 1}}; /*Scaling shift array shape=[NC]*/ + const Argument input_zp = {Category::Input, "input_zp", "in_t", {1, 1}}; /*Input tensor zero point. int8/uint8 can + have zero point within their valid range. + uint16 zero point must be either 0 or + 32768. All other types must have zero + point equal to 0. shape=[1]*/ + const Argument output_zp = {Category::Input, "output_zp", "out_t", {1, 1}}; /*Output tensor zero point.int8/uint8 + can have zero point within their valid + range. uint16 zero point must be either + 0 or 32768. All other types must have + zero point equal to 0. shape=[1]*/ const Argument scale32 = { - Category::ScalarAttribute, + Category::Attribute, "scale32", "bool_t", - }; /*if (scale32) mul_t=i32_t else mul_t=i16_t */ - const Argument double_round = { - Category::ScalarAttribute, - "double_round", - "bool_t", - }; /*Select double round mode */ + }; /*if (scale32) mul_t=i32_t else mul_t=i16_t shape=-*/ + const Argument rounding_mode = { + Category::Attribute, + "rounding_mode", + "rounding_mode_t", + }; /*Select rounding mode shape=-*/ const Argument per_channel = { - Category::ScalarAttribute, + Category::Attribute, "per_channel", "bool_t", - }; /*if (per_channel) NC=shape[rank(shape)-1] else NC=1 */ + }; /*if (per_channel) NC=shape[rank(shape)-1] else NC=1 shape=-*/ const Argument input_unsigned = { - Category::ScalarAttribute, + Category::Attribute, "input_unsigned", "bool_t", - }; /*If True, treat the input values as unsigned. */ + }; /*If True, treat the input values as unsigned. shape=-*/ const Argument output_unsigned = { - Category::ScalarAttribute, + Category::Attribute, "output_unsigned", "bool_t", - }; /*If True, treat the output values as unsigned. */ + }; /*If True, treat the output values as unsigned. shape=-*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor with the same shape as + input shape=shape*/ const std::vector arguments = { &input, - &output, - &input_zp, - &output_zp, &multiplier, &shift, + &input_zp, + &output_zp, &scale32, - &double_round, + &rounding_mode, &per_channel, &input_unsigned, &output_unsigned, + &output, }; const std::vector typesupports = { { @@ -2206,24 +2168,29 @@ void ValidateOperator_RESCALE(const regor::Operation *op, const Context &context }, // 48-bit to 32-bit }; ValidateArguments(op, arguments, typesupports, context); - ErrorIfCheck_1wbutqm1lq6qy(op, context); - ErrorIfCheck_2x883ovw61v55(op, context); - ErrorIfCheck_7yfu5xo1ii36(op, context); - ErrorIfCheck_3kc0n1wjhehqz(op, context); - ErrorIfCheck_3rzfyy6qi1bly(op, context); - ErrorIfCheck_3ms1pbkpa2td9(op, context); - ErrorIfCheck_23cyq2l8quj8p(op, context); - ErrorIfCheck_13bcaagzywlqq(op, context); - ErrorIfCheck_31ty7f0kcbfxg(op, context); + ErrorIfCheck_2a4sjfbd544h5(op, context); + ErrorIfCheck_32ylwe00j5q2l(op, context); + ErrorIfCheck_3uwlzew8kfq5w(op, context); + ErrorIfCheck_1sxf726x838dv(op, context); + ErrorIfCheck_2fl3he9sci345(op, context); + ErrorIfCheck_1acxf2776vdap(op, context); + ErrorIfCheck_2ntycki2dof18(op, context); + ErrorIfCheck_1yv98jo1xcmke(op, context); + ErrorIfCheck_bkdiivlz937z(op, context); + ErrorIfCheck_242iuwska81dr(op, context); + ErrorIfCheck_2vooovn86b8fd(op, context); + ErrorIfCheck_107z2k4den74o(op, context); + ErrorIfCheck_38712gnuluf0u(op, context); + ErrorIfCheck_4alci0dog4gp(op, context); ErrorIfCheck_10u6py7exa66n(op, context); + ErrorIfCheck_31ty7f0kcbfxg(op, context); LevelCheck_1flzmpv6hubzc(op, context); } void ValidateOperator_CONST(const regor::Operation *op, const Context &context) { const Argument values = {Category::Attribute, "values", "out_t", {0, MAX_RANK}}; /*Constant values shape=shape*/ - const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor of the same type, size - as the input tensor shape=shape*/ + const Argument output = {Category::Output, "output", "out_t", {0, MAX_RANK}}; /*Output tensor shape=shape*/ const std::vector arguments = { &values, &output, @@ -2247,9 +2214,6 @@ void ValidateOperator_CONST(const regor::Operation *op, const Context &context) { {"out_t", "i48_t"}, }, // 48-bit - { - {"out_t", "shape_t"}, - }, // shape }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_3oet4aggtv528(op, context); @@ -2269,6 +2233,9 @@ void ValidateOperator_IDENTITY(const regor::Operation *op, const Context &contex { {"in_out_t", "bool_t"}, }, // Boolean + { + {"in_out_t", "i4_t"}, + }, // 4-bit { {"in_out_t", "i8_t"}, }, // 8-bit @@ -2278,6 +2245,9 @@ void ValidateOperator_IDENTITY(const regor::Operation *op, const Context &contex { {"in_out_t", "i32_t"}, }, // 32-bit + { + {"in_out_t", "i48_t"}, + }, // 48-bit }; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_396rg8p65j58r(op, context); @@ -2288,64 +2258,66 @@ void ValidateOperator_CUSTOM(const regor::Operation *op, const Context &context) const Argument input_list = { Category::Input, "input_list", - "-", + "tensor_list_t", }; /*List of input tensors shape=-*/ - const Argument operatorName = { + const Argument operator_name = { Category::Attribute, - "operator", - "-", + "operator_name", + "String", }; /*String which tells the backend which custom operator is being called shape=-*/ - const Argument domain = { + const Argument domain_name = { Category::Attribute, - "domain", - "-", - }; /*String idenifier which can help avoid name collisions on the operator field. Different implementations of a + "domain_name", + "String", + }; /*String identifier which can help avoid name collisions on the operator field. Different implementations of a given operator would be in different domains. Implementations can choose which domains they want to support. shape=-*/ const Argument implementation_attrs = { Category::Attribute, "implementation_attrs", - "-", + "String", }; /*String value containing implementation specific attributes which apply to the operation shape=-*/ const Argument output_list = { Category::Output, "output_list", - "-", + "tensor_list_t", }; /*List of output tensors shape=-*/ const std::vector arguments = { &input_list, - &operatorName, - &domain, + &operator_name, + &domain_name, &implementation_attrs, &output_list, }; const std::vector typesupports = {}; ValidateArguments(op, arguments, typesupports, context); + LevelCheck_3ufj7d9b3dpok(op, context); + LevelCheck_2b1mift7kqw7v(op, context); } void ValidateOperator_COND_IF(const regor::Operation *op, const Context &context) { - const Argument condition = {Category::Input, "condition", "bool_t", {1, MAX_RANK}}; /*Input condition as a size 1 + const Argument condition = {Category::Input, "condition", "bool_t", {0, MAX_RANK}}; /*Input condition as a size 1 tensor shape=shape*/ const Argument input_list = { Category::Input, "input_list", - "-", + "tensor_list_t", }; /*List of input tensors shape=-*/ const Argument then_graph = { Category::Attribute, "then_graph", - "-", + "tosa_graph_t", }; /*TOSA graph to execute if condition is true shape=-*/ const Argument else_graph = { Category::Attribute, "else_graph", - "-", + "tosa_graph_t", }; /*TOSA graph to execute if condition is false shape=-*/ const Argument output_list = { Category::Output, "output_list", - "-", + "tensor_list_t", }; /*List of output tensors shape=-*/ const std::vector arguments = { &condition, @@ -2362,6 +2334,9 @@ void ValidateOperator_COND_IF(const regor::Operation *op, const Context &context ErrorIfCheck_n7biu53x2n6k(op, context); ErrorIfCheck_2fd4dk1zw032u(op, context); ErrorIfCheck_omgw2xdm6irr(op, context); + LevelCheck_1flzmpv6hubzc(op, context); + LevelCheck_3ufj7d9b3dpok(op, context); + LevelCheck_2b1mift7kqw7v(op, context); } void ValidateOperator_WHILE_LOOP(const regor::Operation *op, const Context &context) @@ -2369,22 +2344,22 @@ void ValidateOperator_WHILE_LOOP(const regor::Operation *op, const Context &cont const Argument input_list = { Category::Input, "input_list", - "-", + "tensor_list_t", }; /*List of input tensors shape=-*/ const Argument cond_graph = { Category::Attribute, "cond_graph", - "-", + "tosa_graph_t", }; /*TOSA graph to evaluate the condition shape=-*/ const Argument body_graph = { Category::Attribute, "body_graph", - "-", + "tosa_graph_t", }; /*TOSA graph to execute the loop body shape=-*/ const Argument output_list = { Category::Output, "output_list", - "-", + "tensor_list_t", }; /*List of output tensors shape=-*/ const std::vector arguments = { &input_list, @@ -2395,12 +2370,14 @@ void ValidateOperator_WHILE_LOOP(const regor::Operation *op, const Context &cont const std::vector typesupports = {}; ValidateArguments(op, arguments, typesupports, context); ErrorIfCheck_15kl5g5u1jrhq(op, context); - ErrorIfCheck_18hgmc3pexnw4(op, context); + ErrorIfCheck_2jyu87hs8upt4(op, context); ErrorIfCheck_12uu5ff3t3lv8(op, context); ErrorIfCheck_3puzf7van5acf(op, context); ErrorIfCheck_8tihij7a5ep0(op, context); ErrorIfCheck_3lu68v2531bjz(op, context); ErrorIfCheck_1fzl0zyxyd88z(op, context); + LevelCheck_3ufj7d9b3dpok(op, context); + LevelCheck_2b1mift7kqw7v(op, context); } } // namespace @@ -2409,7 +2386,7 @@ namespace tosa namespace validator { -void ValidateOperator_Version_0_80_0_Profile_BI(const GraphApi::GraphOperation *graphOp, const Context &context) +void ValidateOperator_Version_1_0_0_draft_Profile_PRO_INT(const GraphApi::GraphOperation *graphOp, const Context &context) { const auto *op = static_cast(graphOp); switch ( op->Type() ) @@ -2429,9 +2406,6 @@ void ValidateOperator_Version_0_80_0_Profile_BI(const GraphApi::GraphOperation * case regor::OpType::DepthwiseConv2D: ValidateOperator_DEPTHWISE_CONV2D(op, context); break; - case regor::OpType::FullyConnected: - ValidateOperator_FULLY_CONNECTED(op, context); - break; case regor::OpType::MatMul: ValidateOperator_MATMUL(op, context); break; -- GitLab