From 8b69a3f4bd2d31b31ee0ed7bd22e83a925098581 Mon Sep 17 00:00:00 2001 From: Mauricio Briceno Date: Sat, 18 Jan 2025 15:35:22 +0100 Subject: [PATCH] MLBEDSW-10293: TFLite buffer offset support - Regenerated TFLite schema with mutable API - TFLite reader: implement mechanism to load buffers at the end of the file as described in the schema - Update vela.py to read via mmap - TFlite writer: implement mechanism to write buffers at the end of the file as described in the schema Change-Id: I169a5f0e512f1b038393145495ec7040be783969 Signed-off-by: Mauricio Briceno --- ethosu/regor/bindings/python/py_regor.cpp | 8 +- .../regor/compiler/faststorage_allocator.cpp | 2 +- ethosu/regor/test/CMakeLists.txt | 1 + ethosu/regor/test/test_tflite_fb.cpp | 136 +++ .../regor/tflite/tflite_model_semantics.cpp | 3 +- ethosu/regor/tflite/tflite_reader.cpp | 18 +- ethosu/regor/tflite/tflite_reader.hpp | 4 +- .../regor/tflite/tflite_schema_generated.hpp | 883 +++++++++++++++++- ethosu/regor/tflite/tflite_writer.cpp | 176 +++- ethosu/regor/tflite/tflite_writer.hpp | 134 ++- ethosu/vela/vela.py | 12 +- 11 files changed, 1328 insertions(+), 49 deletions(-) create mode 100644 ethosu/regor/test/test_tflite_fb.cpp diff --git a/ethosu/regor/bindings/python/py_regor.cpp b/ethosu/regor/bindings/python/py_regor.cpp index 4c3b34c9..c7a2df4d 100644 --- a/ethosu/regor/bindings/python/py_regor.cpp +++ b/ethosu/regor/bindings/python/py_regor.cpp @@ -215,11 +215,11 @@ public: } } - py::object PyCompile(py::bytes &input, const std::string &fmt) + py::object PyCompile(const py::buffer &input, const std::string &fmt) { // Extract input buffer and size of input buffer - py::buffer_info info(py::buffer(input).request()); - const void *in_data = reinterpret_cast(info.ptr); + py::buffer_info info(input.request()); + const void *in_data = static_cast(info.ptr); size_t in_size = size_t(std::max(info.size, 0)); // Compile the input buffer and return a subclass of PyRegorCompiledModel @@ -574,7 +574,7 @@ PYBIND11_MODULE(regor, m) m.def( "compile", - [](const std::string &arch, py::bytes &input, const std::string &fmt, const std::string &sysconfig, + [](const std::string &arch, py::buffer input, const std::string &fmt, const std::string &sysconfig, const std::string &options = "", bool verbose = false) -> py::object { PyRegor pyr(arch, verbose); diff --git a/ethosu/regor/compiler/faststorage_allocator.cpp b/ethosu/regor/compiler/faststorage_allocator.cpp index c304e3fe..6e3fd685 100644 --- a/ethosu/regor/compiler/faststorage_allocator.cpp +++ b/ethosu/regor/compiler/faststorage_allocator.cpp @@ -194,7 +194,7 @@ void FastStorageAllocator::AllocateFeatureMaps(const std::vectortimeIndex); } } - assert(std::is_sorted(cpuTimeIndices.begin(), cpuTimeIndices.end())); + assert(std::is_sorted(cpuTimeIndices.cbegin(), cpuTimeIndices.cend())); // Evict live ranges that cross a CPU operator std::vector npuOnlyLrs; diff --git a/ethosu/regor/test/CMakeLists.txt b/ethosu/regor/test/CMakeLists.txt index 2c679531..54f4680a 100644 --- a/ethosu/regor/test/CMakeLists.txt +++ b/ethosu/regor/test/CMakeLists.txt @@ -60,6 +60,7 @@ add_catch_test( test_operation_utils.cpp test_graphir_optimiser.cpp test_fast_storage_allocator.cpp + test_tflite_fb.cpp DEPS test_common ) diff --git a/ethosu/regor/test/test_tflite_fb.cpp b/ethosu/regor/test/test_tflite_fb.cpp new file mode 100644 index 00000000..8fa0c6aa --- /dev/null +++ b/ethosu/regor/test/test_tflite_fb.cpp @@ -0,0 +1,136 @@ +// +// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the License); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "common/common.hpp" + +#include "architecture/ethosu85/ethos_u85.hpp" +#include "tflite/tflite_reader.hpp" +#include "tflite/tflite_writer.hpp" +#include "util.hpp" + +#include +#include + +#include "regor.h" + +using namespace regor; + + +TEST_CASE("test_tflite_fb - load/store") +{ + // Create arch + auto arch = CreateArchDefault(); + std::string err = "noerror"; + arch->CheckConfiguration(err); + REQUIRE(err == "noerror"); + + SECTION("Buffer offset") + { + // Get a simple passthrough model + size_t psSize, psOffset; + const void *tfliteOp; + const void *tfliteModel; + const auto passthrough = [&](size_t &size, size_t &offset, const void *&model, const void *&op) + { + flatbuffers::FlatBufferBuilder builder; + std::vector> codes; + std::vector> serialised_operations; + std::vector> serialised_subgraphs; + serialised_operations.push_back(tflite::CreateOperator( + builder, 0, 0, 0, tflite::BuiltinOptions::AddOptions, tflite::CreateAddOptions(builder).Union())); + serialised_subgraphs.push_back(tflite::CreateSubGraphDirect(builder, nullptr, nullptr, nullptr, &serialised_operations)); + codes.push_back(tflite::CreateOperatorCodeDirect(builder, 0, nullptr, 1, tflite::BuiltinOperator::ADD)); + const auto ps = tflite::CreateModelDirect(builder, 3, &codes, &serialised_subgraphs); + tflite::FinishModelBuffer(builder, ps); + const uint8_t *base = builder.ReleaseRaw(size, offset); + + const tflite::Model *m = tflite::GetModel(&base[offset]); + assert(m->operator_codes()); + auto tflite_subgraphs = m->subgraphs(); + assert(tflite_subgraphs->size() == 1); + auto tflite_operators = (*tflite_subgraphs)[0]->operators(); + assert(tflite_operators->size() == 1); + op = (*tflite_operators)[0]; + model = m; + + return std::unique_ptr(base); + }(psSize, psOffset, tfliteModel, tfliteOp); + + // The same model in graph flavor + const auto graphs = [&]() + { + std::vector> ops; + std::vector rawOps; + auto cifmStorageShape = Shape(1, 1, 1, 128); + std::vector cifmData(cifmStorageShape.Elements(), 0); + for ( size_t i = 0; i < cifmData.size(); i++ ) + { + cifmData[i] = i; + } + auto cifm = CreateTensor("CIFM", cifmStorageShape, DataType::Int8, std::move(cifmData)); + auto ifm = CreateTensor("IFM", cifmStorageShape.WithWidth(10), DataType::Int8); + auto ofm = CreateTensor("OFM", cifmStorageShape.WithWidth(10), DataType::Int8); + auto op = CreateOperation(OpType::Add, TensorUsage::IFM, ifm, TensorUsage::IFM1, cifm, TensorUsage::OFM, ofm); + op->SetPassthrough(tfliteOp); + rawOps.push_back(op.get()); + ops.push_back(std::move(op)); + + // Create graph with ops + std::vector> ret; + auto gr = CreateGraph(ops); + gr->SetScheduledOrder(std::move(rawOps)); + gr->SetPassthrough(tfliteModel); + ret.push_back(std::move(gr)); + + return ret; + }(); + + // These integers are output by the call to TfLiteWriter::Serialise below + int64_t output_buffer_offset = 0; + size_t output_buffer_size = 0; + for ( size_t i = 0; i < 2; i++ ) + { + // First iteration : FlatBuffer will have a 2GB limit + // Second iteration : FlatBuffer will have a previous_size-1 limit to ensure offset buffers are used + TfLiteWriter writer(output_buffer_size > 0 ? output_buffer_size - 1 : size_t{1U << 31}); + + auto fb = writer.Serialise(graphs, {{}}, output_buffer_offset, output_buffer_size); + + TfLiteReader reader; + std::vector> readerGraphs; + + reader.LoadGraphs(&fb[output_buffer_offset], output_buffer_size, readerGraphs, nullptr, arch->Constraints()); + + REQUIRE(readerGraphs.size() == 1); + std::vector operations; + readerGraphs[0]->GetAllOperations(operations); + REQUIRE(operations.size() == 1); + const auto *cten = operations[0]->IFM(1); + REQUIRE(cten->Name() == "CIFM"); + REQUIRE(cten->IsConstant()); + auto v = cten->View(); + REQUIRE(v.Elements() == 128); + BufferReader tensorReader = v.Values(); + int8_t j = 0; + for ( const auto &e : tensorReader ) + { + REQUIRE(e == j++); + } + } + } +} diff --git a/ethosu/regor/tflite/tflite_model_semantics.cpp b/ethosu/regor/tflite/tflite_model_semantics.cpp index fdc351f3..08c9a992 100644 --- a/ethosu/regor/tflite/tflite_model_semantics.cpp +++ b/ethosu/regor/tflite/tflite_model_semantics.cpp @@ -246,7 +246,8 @@ void ConstraintEmptyConstTensors(const Model &m_model) auto tensor = tensors[BoundsCheckedIndex(input, tensors)]; auto buffer = buffers[BoundsCheckedIndex(tensor->buffer(), buffers)]; // Buffer 0 is a special buffer that is used for empty tensors - if ( tensor->buffer() != 0 && (!buffer->data() || buffer->data()->size() == 0) ) + if ( (tensor->buffer() > 0 && (!buffer->data() || buffer->data()->size() == 0) && buffer->offset() <= 1) || + (buffer->offset() > 1 && buffer->size() == 0) ) { std::string constraint = "Constant tensors must not have empty buffers"; std::string extra = "Found Constant Tensor with empty buffer"; diff --git a/ethosu/regor/tflite/tflite_reader.cpp b/ethosu/regor/tflite/tflite_reader.cpp index ba362cc6..50d96324 100644 --- a/ethosu/regor/tflite/tflite_reader.cpp +++ b/ethosu/regor/tflite/tflite_reader.cpp @@ -38,6 +38,7 @@ #include "tflite_schema_generated.hpp" #include +#include #include #include @@ -131,8 +132,8 @@ const tflite::Model *TfLiteReader::LoadModel(const void *input, size_t size) return tflite::GetModel(buffer); } -void TfLiteReader::LoadGraphs(const tflite::Model *model, std::vector> &graphs, - OptimiserDatabase *optDb, IArchitectureConstraints *constraints) +void TfLiteReader::LoadGraphs(const uint8_t *input, const tflite::Model *model, + std::vector> &graphs, OptimiserDatabase *optDb, IArchitectureConstraints *constraints) { assert(model); @@ -164,9 +165,14 @@ void TfLiteReader::LoadGraphs(const tflite::Model *model, std::vectordata() ) + if ( tflite_buffer->offset() > 1 ) + { + const uint8_t *data = &input[tflite_buffer->offset()]; + buffers.push_back(std::make_shared(tflite_buffer->size(), data, true)); + } + else if ( tflite_buffer->data() ) { - uint8_t *data = const_cast(tflite_buffer->data()->data()); + const uint8_t *data = tflite_buffer->data()->data(); buffers.push_back(std::make_shared(tflite_buffer->data()->size(), data, true)); } else @@ -305,7 +311,7 @@ void TfLiteReader::LoadGraphs(const tflite::Model *model, std::vector> &graphs, OptimiserDatabase *optDb, IArchitectureConstraints *constraints) { - LoadGraphs(LoadModel(input, size), graphs, optDb, constraints); + LoadGraphs(reinterpret_cast(input), LoadModel(input, size), graphs, optDb, constraints); } std::shared_ptr TfLiteReader::ParseTensor(const tflite::Tensor *tflite_tensor, diff --git a/ethosu/regor/tflite/tflite_reader.hpp b/ethosu/regor/tflite/tflite_reader.hpp index 54bb8ace..66b6fb32 100644 --- a/ethosu/regor/tflite/tflite_reader.hpp +++ b/ethosu/regor/tflite/tflite_reader.hpp @@ -38,12 +38,12 @@ class TfLiteReader public: TfLiteReader() {} - static void LoadGraphs(const tflite::Model *model, std::vector> &graphs, - OptimiserDatabase *optDb, IArchitectureConstraints *constraints); // From model static void LoadGraphs(const void *input, size_t size, std::vector> &graphs, OptimiserDatabase *optDb, IArchitectureConstraints *constraints); // From buffer private: + static void LoadGraphs(const uint8_t *input, const tflite::Model *model, std::vector> &graphs, + OptimiserDatabase *optDb, IArchitectureConstraints *constraints); // From model static const tflite::Model *LoadModel(const void *input, size_t size); static std::shared_ptr ParseTensor(const tflite::Tensor *tflite_tensor, const std::shared_ptr &buffer, std::unordered_map &tensorQuantization); diff --git a/ethosu/regor/tflite/tflite_schema_generated.hpp b/ethosu/regor/tflite/tflite_schema_generated.hpp index a5c8ab35..f7f7b28f 100644 --- a/ethosu/regor/tflite/tflite_schema_generated.hpp +++ b/ethosu/regor/tflite/tflite_schema_generated.hpp @@ -3,7 +3,7 @@ // To reproduce: // flatc version 23.12.23 // schema.fbs @v2.17.1 -// flatc --cpp --scoped-enums --reflect-names schema.fbs +// flatc --cpp --scoped-enums --reflect-names --gen-mutable schema.fbs // sed -i 's/ARG_MAX/ARGMAX/g' tflite_schema_generated.hpp @@ -3248,6 +3248,9 @@ struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table const ::flatbuffers::Vector *custom() const { return GetPointer *>(VT_CUSTOM); } + ::flatbuffers::Vector *mutable_custom() { + return GetPointer<::flatbuffers::Vector *>(VT_CUSTOM); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) && @@ -3309,15 +3312,27 @@ struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T const ::flatbuffers::Vector *min() const { return GetPointer *>(VT_MIN); } + ::flatbuffers::Vector *mutable_min() { + return GetPointer<::flatbuffers::Vector *>(VT_MIN); + } const ::flatbuffers::Vector *max() const { return GetPointer *>(VT_MAX); } + ::flatbuffers::Vector *mutable_max() { + return GetPointer<::flatbuffers::Vector *>(VT_MAX); + } const ::flatbuffers::Vector *scale() const { return GetPointer *>(VT_SCALE); } + ::flatbuffers::Vector *mutable_scale() { + return GetPointer<::flatbuffers::Vector *>(VT_SCALE); + } const ::flatbuffers::Vector *zero_point() const { return GetPointer *>(VT_ZERO_POINT); } + ::flatbuffers::Vector *mutable_zero_point() { + return GetPointer<::flatbuffers::Vector *>(VT_ZERO_POINT); + } tflite::QuantizationDetails details_type() const { return static_cast(GetField(VT_DETAILS_TYPE, 0)); } @@ -3328,9 +3343,15 @@ struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T const tflite::CustomQuantization *details_as_CustomQuantization() const { return details_type() == tflite::QuantizationDetails::CustomQuantization ? static_cast(details()) : nullptr; } + void *mutable_details() { + return GetPointer(VT_DETAILS); + } int32_t quantized_dimension() const { return GetField(VT_QUANTIZED_DIMENSION, 0); } + bool mutate_quantized_dimension(int32_t _quantized_dimension = 0) { + return SetField(VT_QUANTIZED_DIMENSION, _quantized_dimension, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) && @@ -3444,6 +3465,9 @@ struct Int32Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *values() const { return GetPointer *>(VT_VALUES); } + ::flatbuffers::Vector *mutable_values() { + return GetPointer<::flatbuffers::Vector *>(VT_VALUES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && @@ -3498,6 +3522,9 @@ struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *values() const { return GetPointer *>(VT_VALUES); } + ::flatbuffers::Vector *mutable_values() { + return GetPointer<::flatbuffers::Vector *>(VT_VALUES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && @@ -3553,6 +3580,9 @@ struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *values() const { return GetPointer *>(VT_VALUES); } + ::flatbuffers::Vector *mutable_values() { + return GetPointer<::flatbuffers::Vector *>(VT_VALUES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && @@ -3613,9 +3643,15 @@ struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table tflite::DimensionType format() const { return static_cast(GetField(VT_FORMAT, 0)); } + bool mutate_format(tflite::DimensionType _format = static_cast(0)) { + return SetField(VT_FORMAT, static_cast(_format), 0); + } int32_t dense_size() const { return GetField(VT_DENSE_SIZE, 0); } + bool mutate_dense_size(int32_t _dense_size = 0) { + return SetField(VT_DENSE_SIZE, _dense_size, 0); + } tflite::SparseIndexVector array_segments_type() const { return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); } @@ -3632,6 +3668,9 @@ struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { return array_segments_type() == tflite::SparseIndexVector::Uint8Vector ? static_cast(array_segments()) : nullptr; } + void *mutable_array_segments() { + return GetPointer(VT_ARRAY_SEGMENTS); + } tflite::SparseIndexVector array_indices_type() const { return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); } @@ -3648,6 +3687,9 @@ struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { return array_indices_type() == tflite::SparseIndexVector::Uint8Vector ? static_cast(array_indices()) : nullptr; } + void *mutable_array_indices() { + return GetPointer(VT_ARRAY_INDICES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FORMAT, 1) && @@ -3750,12 +3792,21 @@ struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table const ::flatbuffers::Vector *traversal_order() const { return GetPointer *>(VT_TRAVERSAL_ORDER); } + ::flatbuffers::Vector *mutable_traversal_order() { + return GetPointer<::flatbuffers::Vector *>(VT_TRAVERSAL_ORDER); + } const ::flatbuffers::Vector *block_map() const { return GetPointer *>(VT_BLOCK_MAP); } + ::flatbuffers::Vector *mutable_block_map() { + return GetPointer<::flatbuffers::Vector *>(VT_BLOCK_MAP); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *dim_metadata() const { return GetPointer> *>(VT_DIM_METADATA); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_dim_metadata() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_DIM_METADATA); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && @@ -3833,12 +3884,21 @@ struct VariantSubType FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *shape() const { return GetPointer *>(VT_SHAPE); } + ::flatbuffers::Vector *mutable_shape() { + return GetPointer<::flatbuffers::Vector *>(VT_SHAPE); + } tflite::TensorType type() const { return static_cast(GetField(VT_TYPE, 0)); } + bool mutate_type(tflite::TensorType _type = static_cast(0)) { + return SetField(VT_TYPE, static_cast(_type), 0); + } bool has_rank() const { return GetField(VT_HAS_RANK, 0) != 0; } + bool mutate_has_rank(bool _has_rank = 0) { + return SetField(VT_HAS_RANK, static_cast(_has_rank), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) && @@ -3918,33 +3978,63 @@ struct Tensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *shape() const { return GetPointer *>(VT_SHAPE); } + ::flatbuffers::Vector *mutable_shape() { + return GetPointer<::flatbuffers::Vector *>(VT_SHAPE); + } tflite::TensorType type() const { return static_cast(GetField(VT_TYPE, 0)); } + bool mutate_type(tflite::TensorType _type = static_cast(0)) { + return SetField(VT_TYPE, static_cast(_type), 0); + } uint32_t buffer() const { return GetField(VT_BUFFER, 0); } + bool mutate_buffer(uint32_t _buffer = 0) { + return SetField(VT_BUFFER, _buffer, 0); + } const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } const tflite::QuantizationParameters *quantization() const { return GetPointer(VT_QUANTIZATION); } + tflite::QuantizationParameters *mutable_quantization() { + return GetPointer(VT_QUANTIZATION); + } bool is_variable() const { return GetField(VT_IS_VARIABLE, 0) != 0; } + bool mutate_is_variable(bool _is_variable = 0) { + return SetField(VT_IS_VARIABLE, static_cast(_is_variable), 0); + } const tflite::SparsityParameters *sparsity() const { return GetPointer(VT_SPARSITY); } + tflite::SparsityParameters *mutable_sparsity() { + return GetPointer(VT_SPARSITY); + } const ::flatbuffers::Vector *shape_signature() const { return GetPointer *>(VT_SHAPE_SIGNATURE); } + ::flatbuffers::Vector *mutable_shape_signature() { + return GetPointer<::flatbuffers::Vector *>(VT_SHAPE_SIGNATURE); + } bool has_rank() const { return GetField(VT_HAS_RANK, 0) != 0; } + bool mutate_has_rank(bool _has_rank = 0) { + return SetField(VT_HAS_RANK, static_cast(_has_rank), 0); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *variant_tensors() const { return GetPointer> *>(VT_VARIANT_TENSORS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_variant_tensors() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_VARIANT_TENSORS); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) && @@ -4085,21 +4175,39 @@ struct StablehloGatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T const ::flatbuffers::Vector *offset_dims() const { return GetPointer *>(VT_OFFSET_DIMS); } + ::flatbuffers::Vector *mutable_offset_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_OFFSET_DIMS); + } const ::flatbuffers::Vector *collapsed_slice_dims() const { return GetPointer *>(VT_COLLAPSED_SLICE_DIMS); } + ::flatbuffers::Vector *mutable_collapsed_slice_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_COLLAPSED_SLICE_DIMS); + } const ::flatbuffers::Vector *start_index_map() const { return GetPointer *>(VT_START_INDEX_MAP); } + ::flatbuffers::Vector *mutable_start_index_map() { + return GetPointer<::flatbuffers::Vector *>(VT_START_INDEX_MAP); + } int64_t index_vector_dim() const { return GetField(VT_INDEX_VECTOR_DIM, 0); } + bool mutate_index_vector_dim(int64_t _index_vector_dim = 0) { + return SetField(VT_INDEX_VECTOR_DIM, _index_vector_dim, 0); + } const ::flatbuffers::Vector *slice_sizes() const { return GetPointer *>(VT_SLICE_SIZES); } + ::flatbuffers::Vector *mutable_slice_sizes() { + return GetPointer<::flatbuffers::Vector *>(VT_SLICE_SIZES); + } bool indices_are_sorted() const { return GetField(VT_INDICES_ARE_SORTED, 0) != 0; } + bool mutate_indices_are_sorted(bool _indices_are_sorted = 0) { + return SetField(VT_INDICES_ARE_SORTED, static_cast(_indices_are_sorted), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_OFFSET_DIMS) && @@ -4200,6 +4308,9 @@ struct StablehloTransposeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers const ::flatbuffers::Vector *permutation() const { return GetPointer *>(VT_PERMUTATION); } + ::flatbuffers::Vector *mutable_permutation() { + return GetPointer<::flatbuffers::Vector *>(VT_PERMUTATION); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PERMUTATION) && @@ -4258,18 +4369,33 @@ struct StablehloDotGeneralOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffer const ::flatbuffers::Vector *lhs_batching_dimensions() const { return GetPointer *>(VT_LHS_BATCHING_DIMENSIONS); } + ::flatbuffers::Vector *mutable_lhs_batching_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_LHS_BATCHING_DIMENSIONS); + } const ::flatbuffers::Vector *rhs_batching_dimensions() const { return GetPointer *>(VT_RHS_BATCHING_DIMENSIONS); } + ::flatbuffers::Vector *mutable_rhs_batching_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_RHS_BATCHING_DIMENSIONS); + } const ::flatbuffers::Vector *lhs_contracting_dimensions() const { return GetPointer *>(VT_LHS_CONTRACTING_DIMENSIONS); } + ::flatbuffers::Vector *mutable_lhs_contracting_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_LHS_CONTRACTING_DIMENSIONS); + } const ::flatbuffers::Vector *rhs_contracting_dimensions() const { return GetPointer *>(VT_RHS_CONTRACTING_DIMENSIONS); } + ::flatbuffers::Vector *mutable_rhs_contracting_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_RHS_CONTRACTING_DIMENSIONS); + } const ::flatbuffers::Vector *precision_config() const { return GetPointer *>(VT_PRECISION_CONFIG); } + ::flatbuffers::Vector *mutable_precision_config() { + return GetPointer<::flatbuffers::Vector *>(VT_PRECISION_CONFIG); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_LHS_BATCHING_DIMENSIONS) && @@ -4369,21 +4495,39 @@ struct StablehloReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuff const ::flatbuffers::Vector *window_dimensions() const { return GetPointer *>(VT_WINDOW_DIMENSIONS); } + ::flatbuffers::Vector *mutable_window_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_WINDOW_DIMENSIONS); + } const ::flatbuffers::Vector *window_strides() const { return GetPointer *>(VT_WINDOW_STRIDES); } + ::flatbuffers::Vector *mutable_window_strides() { + return GetPointer<::flatbuffers::Vector *>(VT_WINDOW_STRIDES); + } const ::flatbuffers::Vector *base_dilations() const { return GetPointer *>(VT_BASE_DILATIONS); } + ::flatbuffers::Vector *mutable_base_dilations() { + return GetPointer<::flatbuffers::Vector *>(VT_BASE_DILATIONS); + } const ::flatbuffers::Vector *window_dilations() const { return GetPointer *>(VT_WINDOW_DILATIONS); } + ::flatbuffers::Vector *mutable_window_dilations() { + return GetPointer<::flatbuffers::Vector *>(VT_WINDOW_DILATIONS); + } const ::flatbuffers::Vector *padding() const { return GetPointer *>(VT_PADDING); } + ::flatbuffers::Vector *mutable_padding() { + return GetPointer<::flatbuffers::Vector *>(VT_PADDING); + } int32_t body_subgraph_index() const { return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } + bool mutate_body_subgraph_index(int32_t _body_subgraph_index = 0) { + return SetField(VT_BODY_SUBGRAPH_INDEX, _body_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_WINDOW_DIMENSIONS) && @@ -4487,9 +4631,15 @@ struct StablehloWhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Ta int32_t cond_subgraph_index() const { return GetField(VT_COND_SUBGRAPH_INDEX, 0); } + bool mutate_cond_subgraph_index(int32_t _cond_subgraph_index = 0) { + return SetField(VT_COND_SUBGRAPH_INDEX, _cond_subgraph_index, 0); + } int32_t body_subgraph_index() const { return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } + bool mutate_body_subgraph_index(int32_t _body_subgraph_index = 0) { + return SetField(VT_BODY_SUBGRAPH_INDEX, _body_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && @@ -4542,12 +4692,21 @@ struct StablehloSortOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab int64_t dimension() const { return GetField(VT_DIMENSION, 0); } + bool mutate_dimension(int64_t _dimension = 0) { + return SetField(VT_DIMENSION, _dimension, 0); + } bool is_stable() const { return GetField(VT_IS_STABLE, 0) != 0; } + bool mutate_is_stable(bool _is_stable = 0) { + return SetField(VT_IS_STABLE, static_cast(_is_stable), 0); + } int32_t comparator_subgraph_index() const { return GetField(VT_COMPARATOR_SUBGRAPH_INDEX, 0); } + bool mutate_comparator_subgraph_index(int32_t _comparator_subgraph_index = 0) { + return SetField(VT_COMPARATOR_SUBGRAPH_INDEX, _comparator_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DIMENSION, 8) && @@ -4604,6 +4763,9 @@ struct StablehloConcatenateOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffe int64_t dimension() const { return GetField(VT_DIMENSION, 0); } + bool mutate_dimension(int64_t _dimension = 0) { + return SetField(VT_DIMENSION, _dimension, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DIMENSION, 8) && @@ -4648,6 +4810,9 @@ struct StablehloBroadcastInDimOptions FLATBUFFERS_FINAL_CLASS : private ::flatbu const ::flatbuffers::Vector *broadcast_dimensions() const { return GetPointer *>(VT_BROADCAST_DIMENSIONS); } + ::flatbuffers::Vector *mutable_broadcast_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_BROADCAST_DIMENSIONS); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_BROADCAST_DIMENSIONS) && @@ -4703,9 +4868,15 @@ struct StablehloCompareOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers:: tflite::StablehloComparisonDirection comparison_direction() const { return static_cast(GetField(VT_COMPARISON_DIRECTION, 0)); } + bool mutate_comparison_direction(tflite::StablehloComparisonDirection _comparison_direction = static_cast(0)) { + return SetField(VT_COMPARISON_DIRECTION, static_cast(_comparison_direction), 0); + } tflite::StablehloComparisonType compare_type() const { return static_cast(GetField(VT_COMPARE_TYPE, 0)); } + bool mutate_compare_type(tflite::StablehloComparisonType _compare_type = static_cast(0)) { + return SetField(VT_COMPARE_TYPE, static_cast(_compare_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_COMPARISON_DIRECTION, 4) && @@ -4756,6 +4927,9 @@ struct StablehloDynamicSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuff const ::flatbuffers::Vector *slice_sizes() const { return GetPointer *>(VT_SLICE_SIZES); } + ::flatbuffers::Vector *mutable_slice_sizes() { + return GetPointer<::flatbuffers::Vector *>(VT_SLICE_SIZES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SLICE_SIZES) && @@ -4812,12 +4986,21 @@ struct StablehloPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tabl const ::flatbuffers::Vector *edge_padding_low() const { return GetPointer *>(VT_EDGE_PADDING_LOW); } + ::flatbuffers::Vector *mutable_edge_padding_low() { + return GetPointer<::flatbuffers::Vector *>(VT_EDGE_PADDING_LOW); + } const ::flatbuffers::Vector *edge_padding_high() const { return GetPointer *>(VT_EDGE_PADDING_HIGH); } + ::flatbuffers::Vector *mutable_edge_padding_high() { + return GetPointer<::flatbuffers::Vector *>(VT_EDGE_PADDING_HIGH); + } const ::flatbuffers::Vector *interior_padding() const { return GetPointer *>(VT_INTERIOR_PADDING); } + ::flatbuffers::Vector *mutable_interior_padding() { + return GetPointer<::flatbuffers::Vector *>(VT_INTERIOR_PADDING); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_EDGE_PADDING_LOW) && @@ -4892,6 +5075,9 @@ struct StablehloIotaOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab int64_t iota_dimension() const { return GetField(VT_IOTA_DIMENSION, 0); } + bool mutate_iota_dimension(int64_t _iota_dimension = 0) { + return SetField(VT_IOTA_DIMENSION, _iota_dimension, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IOTA_DIMENSION, 8) && @@ -4941,21 +5127,39 @@ struct StablehloCustomCallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffer const ::flatbuffers::String *call_target_name() const { return GetPointer(VT_CALL_TARGET_NAME); } + ::flatbuffers::String *mutable_call_target_name() { + return GetPointer<::flatbuffers::String *>(VT_CALL_TARGET_NAME); + } bool has_side_effect() const { return GetField(VT_HAS_SIDE_EFFECT, 0) != 0; } + bool mutate_has_side_effect(bool _has_side_effect = 0) { + return SetField(VT_HAS_SIDE_EFFECT, static_cast(_has_side_effect), 0); + } const ::flatbuffers::String *backend_config() const { return GetPointer(VT_BACKEND_CONFIG); } + ::flatbuffers::String *mutable_backend_config() { + return GetPointer<::flatbuffers::String *>(VT_BACKEND_CONFIG); + } int32_t api_version() const { return GetField(VT_API_VERSION, 0); } + bool mutate_api_version(int32_t _api_version = 0) { + return SetField(VT_API_VERSION, _api_version, 0); + } const ::flatbuffers::Vector *called_computations() const { return GetPointer *>(VT_CALLED_COMPUTATIONS); } + ::flatbuffers::Vector *mutable_called_computations() { + return GetPointer<::flatbuffers::Vector *>(VT_CALLED_COMPUTATIONS); + } const ::flatbuffers::Vector *custom_attributes() const { return GetPointer *>(VT_CUSTOM_ATTRIBUTES); } + ::flatbuffers::Vector *mutable_custom_attributes() { + return GetPointer<::flatbuffers::Vector *>(VT_CUSTOM_ATTRIBUTES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CALL_TARGET_NAME) && @@ -5057,9 +5261,15 @@ struct StablehloReduceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T const ::flatbuffers::Vector *dimensions() const { return GetPointer *>(VT_DIMENSIONS); } + ::flatbuffers::Vector *mutable_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_DIMENSIONS); + } int32_t body_subgraph_index() const { return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } + bool mutate_body_subgraph_index(int32_t _body_subgraph_index = 0) { + return SetField(VT_BODY_SUBGRAPH_INDEX, _body_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DIMENSIONS) && @@ -5124,12 +5334,21 @@ struct StablehloSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Ta const ::flatbuffers::Vector *start_indices() const { return GetPointer *>(VT_START_INDICES); } + ::flatbuffers::Vector *mutable_start_indices() { + return GetPointer<::flatbuffers::Vector *>(VT_START_INDICES); + } const ::flatbuffers::Vector *limit_indices() const { return GetPointer *>(VT_LIMIT_INDICES); } + ::flatbuffers::Vector *mutable_limit_indices() { + return GetPointer<::flatbuffers::Vector *>(VT_LIMIT_INDICES); + } const ::flatbuffers::Vector *strides() const { return GetPointer *>(VT_STRIDES); } + ::flatbuffers::Vector *mutable_strides() { + return GetPointer<::flatbuffers::Vector *>(VT_STRIDES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_START_INDICES) && @@ -5220,54 +5439,105 @@ struct StablehloConvolutionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffe const ::flatbuffers::Vector *window_strides() const { return GetPointer *>(VT_WINDOW_STRIDES); } + ::flatbuffers::Vector *mutable_window_strides() { + return GetPointer<::flatbuffers::Vector *>(VT_WINDOW_STRIDES); + } const ::flatbuffers::Vector *padding() const { return GetPointer *>(VT_PADDING); } + ::flatbuffers::Vector *mutable_padding() { + return GetPointer<::flatbuffers::Vector *>(VT_PADDING); + } const ::flatbuffers::Vector *lhs_dilation() const { return GetPointer *>(VT_LHS_DILATION); } + ::flatbuffers::Vector *mutable_lhs_dilation() { + return GetPointer<::flatbuffers::Vector *>(VT_LHS_DILATION); + } const ::flatbuffers::Vector *rhs_dilation() const { return GetPointer *>(VT_RHS_DILATION); } + ::flatbuffers::Vector *mutable_rhs_dilation() { + return GetPointer<::flatbuffers::Vector *>(VT_RHS_DILATION); + } const ::flatbuffers::Vector *window_reversal() const { return GetPointer *>(VT_WINDOW_REVERSAL); } + ::flatbuffers::Vector *mutable_window_reversal() { + return GetPointer<::flatbuffers::Vector *>(VT_WINDOW_REVERSAL); + } int64_t input_batch_dimension() const { return GetField(VT_INPUT_BATCH_DIMENSION, 0); } + bool mutate_input_batch_dimension(int64_t _input_batch_dimension = 0) { + return SetField(VT_INPUT_BATCH_DIMENSION, _input_batch_dimension, 0); + } int64_t input_feature_dimension() const { return GetField(VT_INPUT_FEATURE_DIMENSION, 0); } + bool mutate_input_feature_dimension(int64_t _input_feature_dimension = 0) { + return SetField(VT_INPUT_FEATURE_DIMENSION, _input_feature_dimension, 0); + } const ::flatbuffers::Vector *input_spatial_dimensions() const { return GetPointer *>(VT_INPUT_SPATIAL_DIMENSIONS); } + ::flatbuffers::Vector *mutable_input_spatial_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_INPUT_SPATIAL_DIMENSIONS); + } int64_t kernel_input_feature_dimension() const { return GetField(VT_KERNEL_INPUT_FEATURE_DIMENSION, 0); } + bool mutate_kernel_input_feature_dimension(int64_t _kernel_input_feature_dimension = 0) { + return SetField(VT_KERNEL_INPUT_FEATURE_DIMENSION, _kernel_input_feature_dimension, 0); + } int64_t kernel_output_feature_dimension() const { return GetField(VT_KERNEL_OUTPUT_FEATURE_DIMENSION, 0); } + bool mutate_kernel_output_feature_dimension(int64_t _kernel_output_feature_dimension = 0) { + return SetField(VT_KERNEL_OUTPUT_FEATURE_DIMENSION, _kernel_output_feature_dimension, 0); + } const ::flatbuffers::Vector *kernel_spatial_dimensions() const { return GetPointer *>(VT_KERNEL_SPATIAL_DIMENSIONS); } + ::flatbuffers::Vector *mutable_kernel_spatial_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_KERNEL_SPATIAL_DIMENSIONS); + } int64_t output_batch_dimension() const { return GetField(VT_OUTPUT_BATCH_DIMENSION, 0); } + bool mutate_output_batch_dimension(int64_t _output_batch_dimension = 0) { + return SetField(VT_OUTPUT_BATCH_DIMENSION, _output_batch_dimension, 0); + } int64_t output_feature_dimension() const { return GetField(VT_OUTPUT_FEATURE_DIMENSION, 0); } + bool mutate_output_feature_dimension(int64_t _output_feature_dimension = 0) { + return SetField(VT_OUTPUT_FEATURE_DIMENSION, _output_feature_dimension, 0); + } const ::flatbuffers::Vector *output_spatial_dimensions() const { return GetPointer *>(VT_OUTPUT_SPATIAL_DIMENSIONS); } + ::flatbuffers::Vector *mutable_output_spatial_dimensions() { + return GetPointer<::flatbuffers::Vector *>(VT_OUTPUT_SPATIAL_DIMENSIONS); + } int64_t feature_group_count() const { return GetField(VT_FEATURE_GROUP_COUNT, 0); } + bool mutate_feature_group_count(int64_t _feature_group_count = 0) { + return SetField(VT_FEATURE_GROUP_COUNT, _feature_group_count, 0); + } int64_t batch_group_count() const { return GetField(VT_BATCH_GROUP_COUNT, 0); } + bool mutate_batch_group_count(int64_t _batch_group_count = 0) { + return SetField(VT_BATCH_GROUP_COUNT, _batch_group_count, 0); + } const ::flatbuffers::Vector *precision_config() const { return GetPointer *>(VT_PRECISION_CONFIG); } + ::flatbuffers::Vector *mutable_precision_config() { + return GetPointer<::flatbuffers::Vector *>(VT_PRECISION_CONFIG); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_WINDOW_STRIDES) && @@ -5472,24 +5742,45 @@ struct StablehloScatterOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers:: bool indices_are_sorted() const { return GetField(VT_INDICES_ARE_SORTED, 0) != 0; } + bool mutate_indices_are_sorted(bool _indices_are_sorted = 0) { + return SetField(VT_INDICES_ARE_SORTED, static_cast(_indices_are_sorted), 0); + } const ::flatbuffers::Vector *update_window_dims() const { return GetPointer *>(VT_UPDATE_WINDOW_DIMS); } + ::flatbuffers::Vector *mutable_update_window_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_UPDATE_WINDOW_DIMS); + } const ::flatbuffers::Vector *inserted_window_dims() const { return GetPointer *>(VT_INSERTED_WINDOW_DIMS); } + ::flatbuffers::Vector *mutable_inserted_window_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_INSERTED_WINDOW_DIMS); + } const ::flatbuffers::Vector *scatter_dims_to_operand_dims() const { return GetPointer *>(VT_SCATTER_DIMS_TO_OPERAND_DIMS); } + ::flatbuffers::Vector *mutable_scatter_dims_to_operand_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_SCATTER_DIMS_TO_OPERAND_DIMS); + } int64_t index_vector_dim() const { return GetField(VT_INDEX_VECTOR_DIM, 0); } + bool mutate_index_vector_dim(int64_t _index_vector_dim = 0) { + return SetField(VT_INDEX_VECTOR_DIM, _index_vector_dim, 0); + } bool unique_indices() const { return GetField(VT_UNIQUE_INDICES, 0) != 0; } + bool mutate_unique_indices(bool _unique_indices = 0) { + return SetField(VT_UNIQUE_INDICES, static_cast(_unique_indices), 0); + } int32_t update_computation_subgraph_index() const { return GetField(VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, 0); } + bool mutate_update_computation_subgraph_index(int32_t _update_computation_subgraph_index = 0) { + return SetField(VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, _update_computation_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_INDICES_ARE_SORTED, 1) && @@ -5596,6 +5887,9 @@ struct StablehloRngBitGeneratorOptions FLATBUFFERS_FINAL_CLASS : private ::flatb tflite::RngAlgorithm algorithm() const { return static_cast(GetField(VT_ALGORITHM, 0)); } + bool mutate_algorithm(tflite::RngAlgorithm _algorithm = static_cast(0)) { + return SetField(VT_ALGORITHM, static_cast(_algorithm), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ALGORITHM, 1) && @@ -5646,24 +5940,45 @@ struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } + bool mutate_padding(tflite::Padding _padding = static_cast(0)) { + return SetField(VT_PADDING, static_cast(_padding), 0); + } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } + bool mutate_stride_w(int32_t _stride_w = 0) { + return SetField(VT_STRIDE_W, _stride_w, 0); + } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } + bool mutate_stride_h(int32_t _stride_h = 0) { + return SetField(VT_STRIDE_H, _stride_h, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } int32_t dilation_w_factor() const { return GetField(VT_DILATION_W_FACTOR, 1); } + bool mutate_dilation_w_factor(int32_t _dilation_w_factor = 1) { + return SetField(VT_DILATION_W_FACTOR, _dilation_w_factor, 1); + } int32_t dilation_h_factor() const { return GetField(VT_DILATION_H_FACTOR, 1); } + bool mutate_dilation_h_factor(int32_t _dilation_h_factor = 1) { + return SetField(VT_DILATION_H_FACTOR, _dilation_h_factor, 1); + } tflite::TensorType quantized_bias_type() const { return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); } + bool mutate_quantized_bias_type(tflite::TensorType _quantized_bias_type = static_cast(0)) { + return SetField(VT_QUANTIZED_BIAS_TYPE, static_cast(_quantized_bias_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING, 1) && @@ -5751,27 +6066,51 @@ struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } + bool mutate_padding(tflite::Padding _padding = static_cast(0)) { + return SetField(VT_PADDING, static_cast(_padding), 0); + } int32_t stride_d() const { return GetField(VT_STRIDE_D, 0); } + bool mutate_stride_d(int32_t _stride_d = 0) { + return SetField(VT_STRIDE_D, _stride_d, 0); + } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } + bool mutate_stride_w(int32_t _stride_w = 0) { + return SetField(VT_STRIDE_W, _stride_w, 0); + } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } + bool mutate_stride_h(int32_t _stride_h = 0) { + return SetField(VT_STRIDE_H, _stride_h, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } int32_t dilation_d_factor() const { return GetField(VT_DILATION_D_FACTOR, 1); } + bool mutate_dilation_d_factor(int32_t _dilation_d_factor = 1) { + return SetField(VT_DILATION_D_FACTOR, _dilation_d_factor, 1); + } int32_t dilation_w_factor() const { return GetField(VT_DILATION_W_FACTOR, 1); } + bool mutate_dilation_w_factor(int32_t _dilation_w_factor = 1) { + return SetField(VT_DILATION_W_FACTOR, _dilation_w_factor, 1); + } int32_t dilation_h_factor() const { return GetField(VT_DILATION_H_FACTOR, 1); } + bool mutate_dilation_h_factor(int32_t _dilation_h_factor = 1) { + return SetField(VT_DILATION_H_FACTOR, _dilation_h_factor, 1); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING, 1) && @@ -5863,21 +6202,39 @@ struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } + bool mutate_padding(tflite::Padding _padding = static_cast(0)) { + return SetField(VT_PADDING, static_cast(_padding), 0); + } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } + bool mutate_stride_w(int32_t _stride_w = 0) { + return SetField(VT_STRIDE_W, _stride_w, 0); + } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } + bool mutate_stride_h(int32_t _stride_h = 0) { + return SetField(VT_STRIDE_H, _stride_h, 0); + } int32_t filter_width() const { return GetField(VT_FILTER_WIDTH, 0); } + bool mutate_filter_width(int32_t _filter_width = 0) { + return SetField(VT_FILTER_WIDTH, _filter_width, 0); + } int32_t filter_height() const { return GetField(VT_FILTER_HEIGHT, 0); } + bool mutate_filter_height(int32_t _filter_height = 0) { + return SetField(VT_FILTER_HEIGHT, _filter_height, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING, 1) && @@ -5958,24 +6315,45 @@ struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T tflite::Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } + bool mutate_padding(tflite::Padding _padding = static_cast(0)) { + return SetField(VT_PADDING, static_cast(_padding), 0); + } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } + bool mutate_stride_w(int32_t _stride_w = 0) { + return SetField(VT_STRIDE_W, _stride_w, 0); + } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } + bool mutate_stride_h(int32_t _stride_h = 0) { + return SetField(VT_STRIDE_H, _stride_h, 0); + } int32_t depth_multiplier() const { return GetField(VT_DEPTH_MULTIPLIER, 0); } + bool mutate_depth_multiplier(int32_t _depth_multiplier = 0) { + return SetField(VT_DEPTH_MULTIPLIER, _depth_multiplier, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } int32_t dilation_w_factor() const { return GetField(VT_DILATION_W_FACTOR, 1); } + bool mutate_dilation_w_factor(int32_t _dilation_w_factor = 1) { + return SetField(VT_DILATION_W_FACTOR, _dilation_w_factor, 1); + } int32_t dilation_h_factor() const { return GetField(VT_DILATION_H_FACTOR, 1); } + bool mutate_dilation_h_factor(int32_t _dilation_h_factor = 1) { + return SetField(VT_DILATION_H_FACTOR, _dilation_h_factor, 1); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING, 1) && @@ -6058,12 +6436,21 @@ struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers:: int32_t num_channels() const { return GetField(VT_NUM_CHANNELS, 0); } + bool mutate_num_channels(int32_t _num_channels = 0) { + return SetField(VT_NUM_CHANNELS, _num_channels, 0); + } const ::flatbuffers::Vector *num_columns_per_channel() const { return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); } + ::flatbuffers::Vector *mutable_num_columns_per_channel() { + return GetPointer<::flatbuffers::Vector *>(VT_NUM_COLUMNS_PER_CHANNEL); + } const ::flatbuffers::Vector *embedding_dim_per_channel() const { return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); } + ::flatbuffers::Vector *mutable_embedding_dim_per_channel() { + return GetPointer<::flatbuffers::Vector *>(VT_EMBEDDING_DIM_PER_CHANNEL); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_CHANNELS, 4) && @@ -6136,6 +6523,9 @@ struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab tflite::LSHProjectionType type() const { return static_cast(GetField(VT_TYPE, 0)); } + bool mutate_type(tflite::LSHProjectionType _type = static_cast(0)) { + return SetField(VT_TYPE, static_cast(_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TYPE, 1) && @@ -6182,12 +6572,21 @@ struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t rank() const { return GetField(VT_RANK, 0); } + bool mutate_rank(int32_t _rank = 0) { + return SetField(VT_RANK, _rank, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_RANK, 4) && @@ -6245,9 +6644,15 @@ struct RNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6300,12 +6705,21 @@ struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table bool time_major() const { return GetField(VT_TIME_MAJOR, 0) != 0; } + bool mutate_time_major(bool _time_major = 0) { + return SetField(VT_TIME_MAJOR, static_cast(_time_major), 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TIME_MAJOR, 1) && @@ -6365,15 +6779,27 @@ struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatb bool time_major() const { return GetField(VT_TIME_MAJOR, 0) != 0; } + bool mutate_time_major(bool _time_major = 0) { + return SetField(VT_TIME_MAJOR, static_cast(_time_major), 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool merge_outputs() const { return GetField(VT_MERGE_OUTPUTS, 0) != 0; } + bool mutate_merge_outputs(bool _merge_outputs = 0) { + return SetField(VT_MERGE_OUTPUTS, static_cast(_merge_outputs), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TIME_MAJOR, 1) && @@ -6440,18 +6866,33 @@ struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Ta tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } tflite::FullyConnectedOptionsWeightsFormat weights_format() const { return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); } + bool mutate_weights_format(tflite::FullyConnectedOptionsWeightsFormat _weights_format = static_cast(0)) { + return SetField(VT_WEIGHTS_FORMAT, static_cast(_weights_format), 0); + } bool keep_num_dims() const { return GetField(VT_KEEP_NUM_DIMS, 0) != 0; } + bool mutate_keep_num_dims(bool _keep_num_dims = 0) { + return SetField(VT_KEEP_NUM_DIMS, static_cast(_keep_num_dims), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } tflite::TensorType quantized_bias_type() const { return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); } + bool mutate_quantized_bias_type(tflite::TensorType _quantized_bias_type = static_cast(0)) { + return SetField(VT_QUANTIZED_BIAS_TYPE, static_cast(_quantized_bias_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6520,6 +6961,9 @@ struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { float beta() const { return GetField(VT_BETA, 0.0f); } + bool mutate_beta(float _beta = 0.0f) { + return SetField(VT_BETA, _beta, 0.0f); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BETA, 4) && @@ -6565,9 +7009,15 @@ struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab int32_t axis() const { return GetField(VT_AXIS, 0); } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && @@ -6619,9 +7069,15 @@ struct AddOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool pot_scale_int16() const { return GetField(VT_POT_SCALE_INT16, 1) != 0; } + bool mutate_pot_scale_int16(bool _pot_scale_int16 = 1) { + return SetField(VT_POT_SCALE_INT16, static_cast(_pot_scale_int16), 1); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6672,6 +7128,9 @@ struct MulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6716,6 +7175,9 @@ struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6763,15 +7225,27 @@ struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private ::fla int32_t radius() const { return GetField(VT_RADIUS, 0); } + bool mutate_radius(int32_t _radius = 0) { + return SetField(VT_RADIUS, _radius, 0); + } float bias() const { return GetField(VT_BIAS, 0.0f); } + bool mutate_bias(float _bias = 0.0f) { + return SetField(VT_BIAS, _bias, 0.0f); + } float alpha() const { return GetField(VT_ALPHA, 0.0f); } + bool mutate_alpha(float _alpha = 0.0f) { + return SetField(VT_ALPHA, _alpha, 0.0f); + } float beta() const { return GetField(VT_BETA, 0.0f); } + bool mutate_beta(float _beta = 0.0f) { + return SetField(VT_BETA, _beta, 0.0f); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_RADIUS, 4) && @@ -6838,18 +7312,33 @@ struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } float cell_clip() const { return GetField(VT_CELL_CLIP, 0.0f); } + bool mutate_cell_clip(float _cell_clip = 0.0f) { + return SetField(VT_CELL_CLIP, _cell_clip, 0.0f); + } float proj_clip() const { return GetField(VT_PROJ_CLIP, 0.0f); } + bool mutate_proj_clip(float _proj_clip = 0.0f) { + return SetField(VT_PROJ_CLIP, _proj_clip, 0.0f); + } tflite::LSTMKernelType kernel_type() const { return static_cast(GetField(VT_KERNEL_TYPE, 0)); } + bool mutate_kernel_type(tflite::LSTMKernelType _kernel_type = static_cast(0)) { + return SetField(VT_KERNEL_TYPE, static_cast(_kernel_type), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -6923,21 +7412,39 @@ struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::fla tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } float cell_clip() const { return GetField(VT_CELL_CLIP, 0.0f); } + bool mutate_cell_clip(float _cell_clip = 0.0f) { + return SetField(VT_CELL_CLIP, _cell_clip, 0.0f); + } float proj_clip() const { return GetField(VT_PROJ_CLIP, 0.0f); } + bool mutate_proj_clip(float _proj_clip = 0.0f) { + return SetField(VT_PROJ_CLIP, _proj_clip, 0.0f); + } bool time_major() const { return GetField(VT_TIME_MAJOR, 0) != 0; } + bool mutate_time_major(bool _time_major = 0) { + return SetField(VT_TIME_MAJOR, static_cast(_time_major), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool diagonal_recurrent_tensors() const { return GetField(VT_DIAGONAL_RECURRENT_TENSORS, 0) != 0; } + bool mutate_diagonal_recurrent_tensors(bool _diagonal_recurrent_tensors = 0) { + return SetField(VT_DIAGONAL_RECURRENT_TENSORS, static_cast(_diagonal_recurrent_tensors), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -7017,21 +7524,39 @@ struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flat tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } float cell_clip() const { return GetField(VT_CELL_CLIP, 0.0f); } + bool mutate_cell_clip(float _cell_clip = 0.0f) { + return SetField(VT_CELL_CLIP, _cell_clip, 0.0f); + } float proj_clip() const { return GetField(VT_PROJ_CLIP, 0.0f); } + bool mutate_proj_clip(float _proj_clip = 0.0f) { + return SetField(VT_PROJ_CLIP, _proj_clip, 0.0f); + } bool merge_outputs() const { return GetField(VT_MERGE_OUTPUTS, 0) != 0; } + bool mutate_merge_outputs(bool _merge_outputs = 0) { + return SetField(VT_MERGE_OUTPUTS, static_cast(_merge_outputs), 0); + } bool time_major() const { return GetField(VT_TIME_MAJOR, 1) != 0; } + bool mutate_time_major(bool _time_major = 1) { + return SetField(VT_TIME_MAJOR, static_cast(_time_major), 1); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -7107,9 +7632,15 @@ struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Ta bool align_corners() const { return GetField(VT_ALIGN_CORNERS, 0) != 0; } + bool mutate_align_corners(bool _align_corners = 0) { + return SetField(VT_ALIGN_CORNERS, static_cast(_align_corners), 0); + } bool half_pixel_centers() const { return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; } + bool mutate_half_pixel_centers(bool _half_pixel_centers = 0) { + return SetField(VT_HALF_PIXEL_CENTERS, static_cast(_half_pixel_centers), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ALIGN_CORNERS, 1) && @@ -7161,9 +7692,15 @@ struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuff bool align_corners() const { return GetField(VT_ALIGN_CORNERS, 0) != 0; } + bool mutate_align_corners(bool _align_corners = 0) { + return SetField(VT_ALIGN_CORNERS, static_cast(_align_corners), 0); + } bool half_pixel_centers() const { return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; } + bool mutate_half_pixel_centers(bool _half_pixel_centers = 0) { + return SetField(VT_HALF_PIXEL_CENTERS, static_cast(_half_pixel_centers), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ALIGN_CORNERS, 1) && @@ -7214,6 +7751,9 @@ struct CallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { uint32_t subgraph() const { return GetField(VT_SUBGRAPH, 0); } + bool mutate_subgraph(uint32_t _subgraph = 0) { + return SetField(VT_SUBGRAPH, _subgraph, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_SUBGRAPH, 4) && @@ -7322,6 +7862,9 @@ struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *new_shape() const { return GetPointer *>(VT_NEW_SHAPE); } + ::flatbuffers::Vector *mutable_new_shape() { + return GetPointer<::flatbuffers::Vector *>(VT_NEW_SHAPE); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) && @@ -7442,12 +7985,21 @@ struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t ngram_size() const { return GetField(VT_NGRAM_SIZE, 0); } + bool mutate_ngram_size(int32_t _ngram_size = 0) { + return SetField(VT_NGRAM_SIZE, _ngram_size, 0); + } int32_t max_skip_size() const { return GetField(VT_MAX_SKIP_SIZE, 0); } + bool mutate_max_skip_size(int32_t _max_skip_size = 0) { + return SetField(VT_MAX_SKIP_SIZE, _max_skip_size, 0); + } bool include_all_ngrams() const { return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; } + bool mutate_include_all_ngrams(bool _include_all_ngrams = 0) { + return SetField(VT_INCLUDE_ALL_NGRAMS, static_cast(_include_all_ngrams), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NGRAM_SIZE, 4) && @@ -7504,6 +8056,9 @@ struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tabl int32_t block_size() const { return GetField(VT_BLOCK_SIZE, 0); } + bool mutate_block_size(int32_t _block_size = 0) { + return SetField(VT_BLOCK_SIZE, _block_size, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BLOCK_SIZE, 4) && @@ -7548,6 +8103,9 @@ struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tabl int32_t block_size() const { return GetField(VT_BLOCK_SIZE, 0); } + bool mutate_block_size(int32_t _block_size = 0) { + return SetField(VT_BLOCK_SIZE, _block_size, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BLOCK_SIZE, 4) && @@ -7593,9 +8151,15 @@ struct SubOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool pot_scale_int16() const { return GetField(VT_POT_SCALE_INT16, 1) != 0; } + bool mutate_pot_scale_int16(bool _pot_scale_int16 = 1) { + return SetField(VT_POT_SCALE_INT16, static_cast(_pot_scale_int16), 1); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -7646,6 +8210,9 @@ struct DivOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && @@ -7722,6 +8289,9 @@ struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuff tflite::CombinerType combiner() const { return static_cast(GetField(VT_COMBINER, 0)); } + bool mutate_combiner(tflite::CombinerType _combiner = static_cast(0)) { + return SetField(VT_COMBINER, static_cast(_combiner), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_COMBINER, 1) && @@ -7767,9 +8337,15 @@ struct GatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t axis() const { return GetField(VT_AXIS, 0); } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } int32_t batch_dims() const { return GetField(VT_BATCH_DIMS, 0); } + bool mutate_batch_dims(int32_t _batch_dims = 0) { + return SetField(VT_BATCH_DIMS, _batch_dims, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && @@ -7916,6 +8492,9 @@ struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { bool keep_dims() const { return GetField(VT_KEEP_DIMS, 0) != 0; } + bool mutate_keep_dims(bool _keep_dims = 0) { + return SetField(VT_KEEP_DIMS, static_cast(_keep_dims), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_KEEP_DIMS, 1) && @@ -7960,6 +8539,9 @@ struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *squeeze_dims() const { return GetPointer *>(VT_SQUEEZE_DIMS); } + ::flatbuffers::Vector *mutable_squeeze_dims() { + return GetPointer<::flatbuffers::Vector *>(VT_SQUEEZE_DIMS); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) && @@ -8014,6 +8596,9 @@ struct SplitOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t num_splits() const { return GetField(VT_NUM_SPLITS, 0); } + bool mutate_num_splits(int32_t _num_splits = 0) { + return SetField(VT_NUM_SPLITS, _num_splits, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_SPLITS, 4) && @@ -8058,6 +8643,9 @@ struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t num_splits() const { return GetField(VT_NUM_SPLITS, 0); } + bool mutate_num_splits(int32_t _num_splits = 0) { + return SetField(VT_NUM_SPLITS, _num_splits, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_SPLITS, 4) && @@ -8107,21 +8695,39 @@ struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tabl int32_t begin_mask() const { return GetField(VT_BEGIN_MASK, 0); } + bool mutate_begin_mask(int32_t _begin_mask = 0) { + return SetField(VT_BEGIN_MASK, _begin_mask, 0); + } int32_t end_mask() const { return GetField(VT_END_MASK, 0); } + bool mutate_end_mask(int32_t _end_mask = 0) { + return SetField(VT_END_MASK, _end_mask, 0); + } int32_t ellipsis_mask() const { return GetField(VT_ELLIPSIS_MASK, 0); } + bool mutate_ellipsis_mask(int32_t _ellipsis_mask = 0) { + return SetField(VT_ELLIPSIS_MASK, _ellipsis_mask, 0); + } int32_t new_axis_mask() const { return GetField(VT_NEW_AXIS_MASK, 0); } + bool mutate_new_axis_mask(int32_t _new_axis_mask = 0) { + return SetField(VT_NEW_AXIS_MASK, _new_axis_mask, 0); + } int32_t shrink_axis_mask() const { return GetField(VT_SHRINK_AXIS_MASK, 0); } + bool mutate_shrink_axis_mask(int32_t _shrink_axis_mask = 0) { + return SetField(VT_SHRINK_AXIS_MASK, _shrink_axis_mask, 0); + } bool offset() const { return GetField(VT_OFFSET, 0) != 0; } + bool mutate_offset(bool _offset = 0) { + return SetField(VT_OFFSET, static_cast(_offset), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BEGIN_MASK, 4) && @@ -8229,9 +8835,15 @@ struct CastOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::TensorType in_data_type() const { return static_cast(GetField(VT_IN_DATA_TYPE, 0)); } + bool mutate_in_data_type(tflite::TensorType _in_data_type = static_cast(0)) { + return SetField(VT_IN_DATA_TYPE, static_cast(_in_data_type), 0); + } tflite::TensorType out_data_type() const { return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); } + bool mutate_out_data_type(tflite::TensorType _out_data_type = static_cast(0)) { + return SetField(VT_OUT_DATA_TYPE, static_cast(_out_data_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IN_DATA_TYPE, 1) && @@ -8378,6 +8990,9 @@ struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::TensorType output_type() const { return static_cast(GetField(VT_OUTPUT_TYPE, 0)); } + bool mutate_output_type(tflite::TensorType _output_type = static_cast(0)) { + return SetField(VT_OUTPUT_TYPE, static_cast(_output_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUTPUT_TYPE, 1) && @@ -8422,6 +9037,9 @@ struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::TensorType output_type() const { return static_cast(GetField(VT_OUTPUT_TYPE, 0)); } + bool mutate_output_type(tflite::TensorType _output_type = static_cast(0)) { + return SetField(VT_OUTPUT_TYPE, static_cast(_output_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUTPUT_TYPE, 1) && @@ -8694,18 +9312,33 @@ struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab tflite::Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } + bool mutate_padding(tflite::Padding _padding = static_cast(0)) { + return SetField(VT_PADDING, static_cast(_padding), 0); + } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } + bool mutate_stride_w(int32_t _stride_w = 0) { + return SetField(VT_STRIDE_W, _stride_w, 0); + } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } + bool mutate_stride_h(int32_t _stride_h = 0) { + return SetField(VT_STRIDE_H, _stride_h, 0); + } tflite::ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } + bool mutate_fused_activation_function(tflite::ActivationFunctionType _fused_activation_function = static_cast(0)) { + return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + } tflite::TensorType quantized_bias_type() const { return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); } + bool mutate_quantized_bias_type(tflite::TensorType _quantized_bias_type = static_cast(0)) { + return SetField(VT_QUANTIZED_BIAS_TYPE, static_cast(_quantized_bias_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING, 1) && @@ -8806,6 +9439,9 @@ struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tab bool validate_indices() const { return GetField(VT_VALIDATE_INDICES, 0) != 0; } + bool mutate_validate_indices(bool _validate_indices = 0) { + return SetField(VT_VALIDATE_INDICES, static_cast(_validate_indices), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_VALIDATE_INDICES, 1) && @@ -8914,6 +9550,9 @@ struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::TensorType out_type() const { return static_cast(GetField(VT_OUT_TYPE, 0)); } + bool mutate_out_type(tflite::TensorType _out_type = static_cast(0)) { + return SetField(VT_OUT_TYPE, static_cast(_out_type), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUT_TYPE, 1) && @@ -9025,15 +9664,27 @@ struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { float min() const { return GetField(VT_MIN, 0.0f); } + bool mutate_min(float _min = 0.0f) { + return SetField(VT_MIN, _min, 0.0f); + } float max() const { return GetField(VT_MAX, 0.0f); } + bool mutate_max(float _max = 0.0f) { + return SetField(VT_MAX, _max, 0.0f); + } int32_t num_bits() const { return GetField(VT_NUM_BITS, 0); } + bool mutate_num_bits(int32_t _num_bits = 0) { + return SetField(VT_NUM_BITS, _num_bits, 0); + } bool narrow_range() const { return GetField(VT_NARROW_RANGE, 0) != 0; } + bool mutate_narrow_range(bool _narrow_range = 0) { + return SetField(VT_NARROW_RANGE, static_cast(_narrow_range), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_MIN, 4) && @@ -9097,9 +9748,15 @@ struct PackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t values_count() const { return GetField(VT_VALUES_COUNT, 0); } + bool mutate_values_count(int32_t _values_count = 0) { + return SetField(VT_VALUES_COUNT, _values_count, 0); + } int32_t axis() const { return GetField(VT_AXIS, 0); } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_VALUES_COUNT, 4) && @@ -9182,6 +9839,9 @@ struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t axis() const { return GetField(VT_AXIS, 0); } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS, 4) && @@ -9355,9 +10015,15 @@ struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t num() const { return GetField(VT_NUM, 0); } + bool mutate_num(int32_t _num = 0) { + return SetField(VT_NUM, _num, 0); + } int32_t axis() const { return GetField(VT_AXIS, 0); } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM, 4) && @@ -9600,6 +10266,9 @@ struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { float alpha() const { return GetField(VT_ALPHA, 0.0f); } + bool mutate_alpha(float _alpha = 0.0f) { + return SetField(VT_ALPHA, _alpha, 0.0f); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ALPHA, 4) && @@ -9676,6 +10345,9 @@ struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::MirrorPadMode mode() const { return static_cast(GetField(VT_MODE, 0)); } + bool mutate_mode(tflite::MirrorPadMode _mode = static_cast(0)) { + return SetField(VT_MODE, static_cast(_mode), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_MODE, 1) && @@ -9720,6 +10392,9 @@ struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::TensorType idx_out_type() const { return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); } + bool mutate_idx_out_type(tflite::TensorType _idx_out_type = static_cast(2)) { + return SetField(VT_IDX_OUT_TYPE, static_cast(_idx_out_type), 2); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IDX_OUT_TYPE, 1) && @@ -9893,9 +10568,15 @@ struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::T int32_t seq_dim() const { return GetField(VT_SEQ_DIM, 0); } + bool mutate_seq_dim(int32_t _seq_dim = 0) { + return SetField(VT_SEQ_DIM, _seq_dim, 0); + } int32_t batch_dim() const { return GetField(VT_BATCH_DIM, 0); } + bool mutate_batch_dim(int32_t _batch_dim = 0) { + return SetField(VT_BATCH_DIM, _batch_dim, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_SEQ_DIM, 4) && @@ -10043,9 +10724,15 @@ struct IfOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t then_subgraph_index() const { return GetField(VT_THEN_SUBGRAPH_INDEX, 0); } + bool mutate_then_subgraph_index(int32_t _then_subgraph_index = 0) { + return SetField(VT_THEN_SUBGRAPH_INDEX, _then_subgraph_index, 0); + } int32_t else_subgraph_index() const { return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); } + bool mutate_else_subgraph_index(int32_t _else_subgraph_index = 0) { + return SetField(VT_ELSE_SUBGRAPH_INDEX, _else_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX, 4) && @@ -10096,6 +10783,9 @@ struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t init_subgraph_index() const { return GetField(VT_INIT_SUBGRAPH_INDEX, 0); } + bool mutate_init_subgraph_index(int32_t _init_subgraph_index = 0) { + return SetField(VT_INIT_SUBGRAPH_INDEX, _init_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX, 4) && @@ -10141,9 +10831,15 @@ struct WhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t cond_subgraph_index() const { return GetField(VT_COND_SUBGRAPH_INDEX, 0); } + bool mutate_cond_subgraph_index(int32_t _cond_subgraph_index = 0) { + return SetField(VT_COND_SUBGRAPH_INDEX, _cond_subgraph_index, 0); + } int32_t body_subgraph_index() const { return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } + bool mutate_body_subgraph_index(int32_t _body_subgraph_index = 0) { + return SetField(VT_BODY_SUBGRAPH_INDEX, _body_subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && @@ -10388,12 +11084,21 @@ struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table bool adj_x() const { return GetField(VT_ADJ_X, 0) != 0; } + bool mutate_adj_x(bool _adj_x = 0) { + return SetField(VT_ADJ_X, static_cast(_adj_x), 0); + } bool adj_y() const { return GetField(VT_ADJ_Y, 0) != 0; } + bool mutate_adj_y(bool _adj_y = 0) { + return SetField(VT_ADJ_Y, static_cast(_adj_y), 0); + } bool asymmetric_quantize_inputs() const { return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } + bool mutate_asymmetric_quantize_inputs(bool _asymmetric_quantize_inputs = 0) { + return SetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(_asymmetric_quantize_inputs), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ADJ_X, 1) && @@ -10451,9 +11156,15 @@ struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { bool exclusive() const { return GetField(VT_EXCLUSIVE, 0) != 0; } + bool mutate_exclusive(bool _exclusive = 0) { + return SetField(VT_EXCLUSIVE, static_cast(_exclusive), 0); + } bool reverse() const { return GetField(VT_REVERSE, 0) != 0; } + bool mutate_reverse(bool _reverse = 0) { + return SetField(VT_REVERSE, static_cast(_reverse), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_EXCLUSIVE, 1) && @@ -10570,12 +11281,21 @@ struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int32_t table_id() const { return GetField(VT_TABLE_ID, 0); } + bool mutate_table_id(int32_t _table_id = 0) { + return SetField(VT_TABLE_ID, _table_id, 0); + } tflite::TensorType key_dtype() const { return static_cast(GetField(VT_KEY_DTYPE, 0)); } + bool mutate_key_dtype(tflite::TensorType _key_dtype = static_cast(0)) { + return SetField(VT_KEY_DTYPE, static_cast(_key_dtype), 0); + } tflite::TensorType value_dtype() const { return static_cast(GetField(VT_VALUE_DTYPE, 0)); } + bool mutate_value_dtype(tflite::TensorType _value_dtype = static_cast(0)) { + return SetField(VT_VALUE_DTYPE, static_cast(_value_dtype), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TABLE_ID, 4) && @@ -10729,9 +11449,15 @@ struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::String *container() const { return GetPointer(VT_CONTAINER); } + ::flatbuffers::String *mutable_container() { + return GetPointer<::flatbuffers::String *>(VT_CONTAINER); + } const ::flatbuffers::String *shared_name() const { return GetPointer(VT_SHARED_NAME); } + ::flatbuffers::String *mutable_shared_name() { + return GetPointer<::flatbuffers::String *>(VT_SHARED_NAME); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CONTAINER) && @@ -10861,9 +11587,15 @@ struct RandomOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int64_t seed() const { return GetField(VT_SEED, 0); } + bool mutate_seed(int64_t _seed = 0) { + return SetField(VT_SEED, _seed, 0); + } int64_t seed2() const { return GetField(VT_SEED2, 0); } + bool mutate_seed2(int64_t _seed2 = 0) { + return SetField(VT_SEED2, _seed2, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_SEED, 8) && @@ -10914,6 +11646,9 @@ struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *boundaries() const { return GetPointer *>(VT_BOUNDARIES); } + ::flatbuffers::Vector *mutable_boundaries() { + return GetPointer<::flatbuffers::Vector *>(VT_BOUNDARIES); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_BOUNDARIES) && @@ -10968,6 +11703,9 @@ struct GeluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { bool approximate() const { return GetField(VT_APPROXIMATE, 0) != 0; } + bool mutate_approximate(bool _approximate = 0) { + return SetField(VT_APPROXIMATE, static_cast(_approximate), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_APPROXIMATE, 1) && @@ -11364,6 +12102,9 @@ struct ReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Tabl tflite::ReduceWindowFunction reduce_function() const { return static_cast(GetField(VT_REDUCE_FUNCTION, 0)); } + bool mutate_reduce_function(tflite::ReduceWindowFunction _reduce_function = static_cast(0)) { + return SetField(VT_REDUCE_FUNCTION, static_cast(_reduce_function), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_REDUCE_FUNCTION, 4) && @@ -11411,15 +12152,27 @@ struct OperatorCode FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { int8_t deprecated_builtin_code() const { return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); } + bool mutate_deprecated_builtin_code(int8_t _deprecated_builtin_code = 0) { + return SetField(VT_DEPRECATED_BUILTIN_CODE, _deprecated_builtin_code, 0); + } const ::flatbuffers::String *custom_code() const { return GetPointer(VT_CUSTOM_CODE); } + ::flatbuffers::String *mutable_custom_code() { + return GetPointer<::flatbuffers::String *>(VT_CUSTOM_CODE); + } int32_t version() const { return GetField(VT_VERSION, 1); } + bool mutate_version(int32_t _version = 1) { + return SetField(VT_VERSION, _version, 1); + } tflite::BuiltinOperator builtin_code() const { return static_cast(GetField(VT_BUILTIN_CODE, 0)); } + bool mutate_builtin_code(tflite::BuiltinOperator _builtin_code = static_cast(0)) { + return SetField(VT_BUILTIN_CODE, static_cast(_builtin_code), 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE, 1) && @@ -11502,18 +12255,33 @@ struct StableHLOCompositeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } int32_t decomposition_subgraph_index() const { return GetField(VT_DECOMPOSITION_SUBGRAPH_INDEX, 0); } + bool mutate_decomposition_subgraph_index(int32_t _decomposition_subgraph_index = 0) { + return SetField(VT_DECOMPOSITION_SUBGRAPH_INDEX, _decomposition_subgraph_index, 0); + } const ::flatbuffers::Vector *composite_attributes() const { return GetPointer *>(VT_COMPOSITE_ATTRIBUTES); } + ::flatbuffers::Vector *mutable_composite_attributes() { + return GetPointer<::flatbuffers::Vector *>(VT_COMPOSITE_ATTRIBUTES); + } tflite::CustomOptionsFormat composite_attributes_format() const { return static_cast(GetField(VT_COMPOSITE_ATTRIBUTES_FORMAT, 0)); } + bool mutate_composite_attributes_format(tflite::CustomOptionsFormat _composite_attributes_format = static_cast(0)) { + return SetField(VT_COMPOSITE_ATTRIBUTES_FORMAT, static_cast(_composite_attributes_format), 0); + } int32_t version() const { return GetField(VT_VERSION, 0); } + bool mutate_version(int32_t _version = 0) { + return SetField(VT_VERSION, _version, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && @@ -11614,12 +12382,21 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { uint32_t opcode_index() const { return GetField(VT_OPCODE_INDEX, 0); } + bool mutate_opcode_index(uint32_t _opcode_index = 0) { + return SetField(VT_OPCODE_INDEX, _opcode_index, 0); + } const ::flatbuffers::Vector *inputs() const { return GetPointer *>(VT_INPUTS); } + ::flatbuffers::Vector *mutable_inputs() { + return GetPointer<::flatbuffers::Vector *>(VT_INPUTS); + } const ::flatbuffers::Vector *outputs() const { return GetPointer *>(VT_OUTPUTS); } + ::flatbuffers::Vector *mutable_outputs() { + return GetPointer<::flatbuffers::Vector *>(VT_OUTPUTS); + } tflite::BuiltinOptions builtin_options_type() const { return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); } @@ -12005,24 +12782,45 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const tflite::RightShiftOptions *builtin_options_as_RightShiftOptions() const { return builtin_options_type() == tflite::BuiltinOptions::RightShiftOptions ? static_cast(builtin_options()) : nullptr; } + void *mutable_builtin_options() { + return GetPointer(VT_BUILTIN_OPTIONS); + } const ::flatbuffers::Vector *custom_options() const { return GetPointer *>(VT_CUSTOM_OPTIONS); } + ::flatbuffers::Vector *mutable_custom_options() { + return GetPointer<::flatbuffers::Vector *>(VT_CUSTOM_OPTIONS); + } tflite::CustomOptionsFormat custom_options_format() const { return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); } + bool mutate_custom_options_format(tflite::CustomOptionsFormat _custom_options_format = static_cast(0)) { + return SetField(VT_CUSTOM_OPTIONS_FORMAT, static_cast(_custom_options_format), 0); + } const ::flatbuffers::Vector *mutating_variable_inputs() const { return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); } + ::flatbuffers::Vector *mutable_mutating_variable_inputs() { + return GetPointer<::flatbuffers::Vector *>(VT_MUTATING_VARIABLE_INPUTS); + } const ::flatbuffers::Vector *intermediates() const { return GetPointer *>(VT_INTERMEDIATES); } + ::flatbuffers::Vector *mutable_intermediates() { + return GetPointer<::flatbuffers::Vector *>(VT_INTERMEDIATES); + } uint64_t large_custom_options_offset() const { return GetField(VT_LARGE_CUSTOM_OPTIONS_OFFSET, 0); } + bool mutate_large_custom_options_offset(uint64_t _large_custom_options_offset = 0) { + return SetField(VT_LARGE_CUSTOM_OPTIONS_OFFSET, _large_custom_options_offset, 0); + } uint64_t large_custom_options_size() const { return GetField(VT_LARGE_CUSTOM_OPTIONS_SIZE, 0); } + bool mutate_large_custom_options_size(uint64_t _large_custom_options_size = 0) { + return SetField(VT_LARGE_CUSTOM_OPTIONS_SIZE, _large_custom_options_size, 0); + } tflite::BuiltinOptions2 builtin_options_2_type() const { return static_cast(GetField(VT_BUILTIN_OPTIONS_2_TYPE, 0)); } @@ -12093,6 +12891,9 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const tflite::StableHLOCompositeOptions *builtin_options_2_as_StableHLOCompositeOptions() const { return builtin_options_2_type() == tflite::BuiltinOptions2::StableHLOCompositeOptions ? static_cast(builtin_options_2()) : nullptr; } + void *mutable_builtin_options_2() { + return GetPointer(VT_BUILTIN_OPTIONS_2); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OPCODE_INDEX, 4) && @@ -12845,18 +13646,33 @@ struct SubGraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector<::flatbuffers::Offset> *tensors() const { return GetPointer> *>(VT_TENSORS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_tensors() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_TENSORS); + } const ::flatbuffers::Vector *inputs() const { return GetPointer *>(VT_INPUTS); } + ::flatbuffers::Vector *mutable_inputs() { + return GetPointer<::flatbuffers::Vector *>(VT_INPUTS); + } const ::flatbuffers::Vector *outputs() const { return GetPointer *>(VT_OUTPUTS); } + ::flatbuffers::Vector *mutable_outputs() { + return GetPointer<::flatbuffers::Vector *>(VT_OUTPUTS); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *operators() const { return GetPointer> *>(VT_OPERATORS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_operators() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OPERATORS); + } const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) && @@ -12955,12 +13771,21 @@ struct Buffer FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector *data() const { return GetPointer *>(VT_DATA); } + ::flatbuffers::Vector *mutable_data() { + return GetPointer<::flatbuffers::Vector *>(VT_DATA); + } uint64_t offset() const { return GetField(VT_OFFSET, 0); } + bool mutate_offset(uint64_t _offset = 0) { + return SetField(VT_OFFSET, _offset, 0); + } uint64_t size() const { return GetField(VT_SIZE, 0); } + bool mutate_size(uint64_t _size = 0) { + return SetField(VT_SIZE, _size, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) && @@ -13033,9 +13858,15 @@ struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } uint32_t buffer() const { return GetField(VT_BUFFER, 0); } + bool mutate_buffer(uint32_t _buffer = 0) { + return SetField(VT_BUFFER, _buffer, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && @@ -13099,9 +13930,15 @@ struct TensorMap FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::String *name() const { return GetPointer(VT_NAME); } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } uint32_t tensor_index() const { return GetField(VT_TENSOR_INDEX, 0); } + bool mutate_tensor_index(uint32_t _tensor_index = 0) { + return SetField(VT_TENSOR_INDEX, _tensor_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && @@ -13167,15 +14004,27 @@ struct SignatureDef FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { const ::flatbuffers::Vector<::flatbuffers::Offset> *inputs() const { return GetPointer> *>(VT_INPUTS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_inputs() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_INPUTS); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *outputs() const { return GetPointer> *>(VT_OUTPUTS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_outputs() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OUTPUTS); + } const ::flatbuffers::String *signature_key() const { return GetPointer(VT_SIGNATURE_KEY); } + ::flatbuffers::String *mutable_signature_key() { + return GetPointer<::flatbuffers::String *>(VT_SIGNATURE_KEY); + } uint32_t subgraph_index() const { return GetField(VT_SUBGRAPH_INDEX, 0); } + bool mutate_subgraph_index(uint32_t _subgraph_index = 0) { + return SetField(VT_SUBGRAPH_INDEX, _subgraph_index, 0); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_INPUTS) && @@ -13267,27 +14116,51 @@ struct Model FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { uint32_t version() const { return GetField(VT_VERSION, 0); } + bool mutate_version(uint32_t _version = 0) { + return SetField(VT_VERSION, _version, 0); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *operator_codes() const { return GetPointer> *>(VT_OPERATOR_CODES); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_operator_codes() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OPERATOR_CODES); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *subgraphs() const { return GetPointer> *>(VT_SUBGRAPHS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_subgraphs() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_SUBGRAPHS); + } const ::flatbuffers::String *description() const { return GetPointer(VT_DESCRIPTION); } + ::flatbuffers::String *mutable_description() { + return GetPointer<::flatbuffers::String *>(VT_DESCRIPTION); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *buffers() const { return GetPointer> *>(VT_BUFFERS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_buffers() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_BUFFERS); + } const ::flatbuffers::Vector *metadata_buffer() const { return GetPointer *>(VT_METADATA_BUFFER); } + ::flatbuffers::Vector *mutable_metadata_buffer() { + return GetPointer<::flatbuffers::Vector *>(VT_METADATA_BUFFER); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *metadata() const { return GetPointer> *>(VT_METADATA); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_metadata() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_METADATA); + } const ::flatbuffers::Vector<::flatbuffers::Offset> *signature_defs() const { return GetPointer> *>(VT_SIGNATURE_DEFS); } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_signature_defs() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_SIGNATURE_DEFS); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_VERSION, 4) && @@ -17671,6 +18544,14 @@ inline const tflite::Model *GetSizePrefixedModel(const void *buf) { return ::flatbuffers::GetSizePrefixedRoot(buf); } +inline Model *GetMutableModel(void *buf) { + return ::flatbuffers::GetMutableRoot(buf); +} + +inline tflite::Model *GetMutableSizePrefixedModel(void *buf) { + return ::flatbuffers::GetMutableSizePrefixedRoot(buf); +} + inline const char *ModelIdentifier() { return "TFL3"; } diff --git a/ethosu/regor/tflite/tflite_writer.cpp b/ethosu/regor/tflite/tflite_writer.cpp index f814f99f..ca707e7a 100644 --- a/ethosu/regor/tflite/tflite_writer.cpp +++ b/ethosu/regor/tflite/tflite_writer.cpp @@ -24,6 +24,8 @@ #include "flatbuffer_utils.hpp" #include "tflite_mapping.hpp" +#include +#include #include #include @@ -47,8 +49,57 @@ flatbuffers::Offset TfLiteWriter::Serialise(const std::vector> &graphs, const std::vector> &tensor_address_maps, int64_t &output_buffer_offset, size_t &output_buffer_size) +{ + std::unique_ptr ret; + bool retryWithBufferOffset = false; + + try + { + ret = SerialiseImpl(graphs, tensor_address_maps, output_buffer_offset, output_buffer_size); + } + catch ( const FlatBuffersSizeException & ) + { + retryWithBufferOffset = true; + } + + if ( retryWithBufferOffset ) + { + _opcodes.clear(); + _buffers.clear(); + _serialised_opcodes.clear(); + _serialised_subgraphs.clear(); + _serialised_buffers.clear(); + _tensors.clear(); + _serialised_operations.clear(); + _serialised_tensors.clear(); + _tensor_addresses.clear(); + _offset_buffers.clear(); + _flatbuffer.Clear(); + + _useBufferOffset = true; + ret = SerialiseImpl(graphs, tensor_address_maps, output_buffer_offset, output_buffer_size); + } + + return ret; +} + +void TfLiteWriter::CheckFlatBufferSize() +{ + if ( _flatbuffer.GetSize() >= _fbSizeCap ) + { + throw FlatBuffersSizeException(); + } +} + +std::unique_ptr TfLiteWriter::SerialiseImpl(const std::vector> &graphs, + const std::vector> &tensor_address_maps, int64_t &output_buffer_offset, size_t &output_buffer_size) { // The zeroth buffer is always present and always empty _buffers[BufferDesc()] = 0; @@ -58,7 +109,6 @@ std::unique_ptr TfLiteWriter::Serialise(const std::vector operations; std::set skip; for ( const auto &operation : graph->ScheduledOrder() ) @@ -223,15 +273,61 @@ std::unique_ptr TfLiteWriter::Serialise(const std::vector(base); + ResultBuffer ret(_flatbuffer); + + // Following the model, place offset tensor buffers at the end of the file + if ( _useBufferOffset ) + { + // Serialise buffers at the end of the file + auto offsetBufferOffset = SerialiseOffsetBuffers(ret); + + // Fixup indirect buffer offsets via the mutable API + FixupFbBuffers(ret.begin(), offsetBufferOffset); + } + + return ret.release(output_buffer_size, output_buffer_offset); +} + + +std::vector TfLiteWriter::SerialiseOffsetBuffers(ResultBuffer &res) +{ + // Reserve buffer + auto align = [](size_t sz) { return (sz + BUFFER_ALIGNMENT - 1) & ~(BUFFER_ALIGNMENT - 1); }; + + size_t newSize = res.pos() + BUFFER_ALIGNMENT; + for ( const auto &buf : _offset_buffers ) + { + newSize = align(newSize) + buf.size(); + } + res.reserve(newSize); + + std::vector offsetBufferOffset; + offsetBufferOffset.reserve(_offset_buffers.size()); + + for ( const auto &buf : _offset_buffers ) + { + res.align(BUFFER_ALIGNMENT); + offsetBufferOffset.push_back(res.push(buf.data(), buf.size())); + } + return offsetBufferOffset; +} + + +void TfLiteWriter::FixupFbBuffers(uint8_t *model, const std::vector &offsetBufferOffset) +{ + auto tflite_buffers = tflite::GetMutableModel(model)->mutable_buffers(); + assert(tflite_buffers); + assert(tflite_buffers->size() == (offsetBufferOffset.size() + 1)); + assert(_offset_buffers.size() == offsetBufferOffset.size()); + for ( size_t i = 0; i < offsetBufferOffset.size(); i++ ) + { + auto tflite_buffer = tflite_buffers->GetMutableObject(flatbuffers::uoffset_t(i + 1)); + tflite_buffer->mutate_offset(offsetBufferOffset[i]); + tflite_buffer->mutate_size(_offset_buffers[i].size()); + } } @@ -367,17 +463,7 @@ flatbuffers::Offset TfLiteWriter::SerialiseTensor(const Tensor * { buffer_index = int(_serialised_buffers.size()); _buffers[descriptor] = buffer_index; - if ( tensor->Type() == DataType::Int48 ) - { // Translate values - const auto values = tensor->View().Values(); - const auto v = std::vector(values.begin(), values.end()); - const int size(v.size() * sizeof(int64_t)); - _serialised_buffers.emplace_back(SerialiseBuffer(reinterpret_cast(v.data()), size)); - } - else - { - _serialised_buffers.emplace_back(SerialiseBuffer(buffer)); - } + SerialiseTensorBuffer(tensor); } else // Buffer has already been serialised - just reference it { @@ -813,20 +899,62 @@ flatbuffers::Offset TfLiteWriter::SerialiseTensorAddresses(int const auto buffer_base = reinterpret_cast(_tensor_addresses.data()); const auto buffer_size = _tensor_addresses.size() * (sizeof(int32_t) / sizeof(uint8_t)); - _serialised_buffers.push_back(SerialiseBuffer(buffer_base, int(buffer_size))); + _serialised_buffers.push_back(SerialiseBuffer(buffer_base, buffer_size)); + if ( _useBufferOffset ) + { + _offset_buffers.emplace_back(buffer_base, buffer_size); + } return tflite::CreateMetadataDirect(_flatbuffer, "OfflineMemoryAllocation", uint32_t(buffer_index)); } +void TfLiteWriter::SerialiseTensorBuffer(const Tensor *tensor) +{ + if ( tensor->Type() == DataType::Int48 ) + { // Translate values + const auto values = tensor->View().Values(); + auto v = std::make_unique>(values.begin(), values.end()); + const auto size = v->size() * sizeof(int64_t); + _serialised_buffers.emplace_back(SerialiseBuffer(reinterpret_cast(v->data()), size)); + if ( _useBufferOffset ) + { + _offset_buffers.emplace_back(std::move(v)); + } + } + else + { + const auto buffer = tensor->View().Buffer(); + _serialised_buffers.emplace_back(SerialiseBuffer(buffer)); + if ( _useBufferOffset ) + { + _offset_buffers.emplace_back(buffer); + } + } +} + flatbuffers::Offset TfLiteWriter::SerialiseBuffer(const Buffer *buffer) { return SerialiseBuffer(buffer->Data(), buffer->Size()); } -flatbuffers::Offset TfLiteWriter::SerialiseBuffer(const uint8_t *data, int size) +flatbuffers::Offset TfLiteWriter::SerialiseBuffer(const uint8_t *data, size_t size) { - _flatbuffer.ForceVectorAlignment(size, 1, 16); // 16-byte alignment - return tflite::CreateBuffer(_flatbuffer, _flatbuffer.CreateVector(data, size)); + flatbuffers::Offset ret; + + _flatbuffer.ForceVectorAlignment(size, sizeof(uint8_t), BUFFER_ALIGNMENT); // 16-byte alignment + if ( _useBufferOffset ) + { + _flatbuffer.ForceDefaults(true); + ret = tflite::CreateBuffer(_flatbuffer); + _flatbuffer.ForceDefaults(false); + } + else + { + ret = tflite::CreateBuffer(_flatbuffer, _flatbuffer.CreateVector(data, size)); + } + CheckFlatBufferSize(); + + return ret; } } // namespace regor diff --git a/ethosu/regor/tflite/tflite_writer.hpp b/ethosu/regor/tflite/tflite_writer.hpp index 6d74dcba..f0e9fd0c 100644 --- a/ethosu/regor/tflite/tflite_writer.hpp +++ b/ethosu/regor/tflite/tflite_writer.hpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2021-2025 Arm Limited and/or its affiliates // SPDX-FileCopyrightText: Copyright 2024 Meta Platforms, Inc. and affiliates. // // SPDX-License-Identifier: Apache-2.0 @@ -39,9 +39,9 @@ namespace regor class TfLiteWriter { public: - TfLiteWriter() + TfLiteWriter(size_t fbSizeCap = size_t{1U << 31}) : + _flatbuffer(flatbuffers::FlatBufferBuilder()), _fbSizeCap(fbSizeCap) { - _flatbuffer = flatbuffers::FlatBufferBuilder(); // TODO: Determine sensible starting size (default is 1KB) } std::unique_ptr Serialise(const std::vector> &graphs, @@ -49,6 +49,10 @@ public: int64_t &output_buffer_offset, size_t &output_buffer_size); private: + std::unique_ptr SerialiseImpl(const std::vector> &graphs, + const std::vector> &tensor_address_maps, + int64_t &output_buffer_offset, size_t &output_buffer_size); + struct BufferDesc { const uint8_t *data = nullptr; @@ -110,9 +114,131 @@ private: flatbuffers::Offset SerialiseTensor(const Tensor *tensor, const Graph &graph); flatbuffers::Offset SerialiseOptions(const Operation *operation, OpType type); flatbuffers::Offset SerialiseTensorAddresses(int subgraphs); + void SerialiseTensorBuffer(const Tensor *tensor); + + class ResultBuffer + { + std::unique_ptr _buf; + size_t _reserved = 0; + size_t _offset = 0; + size_t _wr = 0; + + public: + ResultBuffer(flatbuffers::FlatBufferBuilder &fb) + { + // Can convert to unique_ptr because std::default_delete() is equivalent to + // flatbuffer::DefaultAllocator::deallocate() + // - i.e. they both do `delete base` + size_t size, offset; + auto ptr = fb.ReleaseRaw(size, offset); + + _buf.reset(ptr); + _reserved = size; + _offset = offset; + _wr = size; + } + + uint8_t *begin() { return &_buf[_offset]; } + + size_t reserved() const { return _reserved; } + + void reserve(size_t size) + { + if ( reserved() >= size ) return; + + auto buf = std::make_unique(size); + std::copy_n(begin(), _reserved - _offset, &buf[0]); + _buf.reset(buf.release()); + _reserved = size; + _offset = 0; + } + + size_t push(const uint8_t *buf, size_t size) + { + reserve(_wr + size); + + auto wr = _wr; + std::copy_n(buf, size, &_buf[_wr]); + _wr += size; + return wr; + } + + size_t pos() const { return _wr; } + + void align(size_t alignment) + { + _wr = (_wr + alignment - 1) & ~(alignment - 1); + reserve(_wr); + } + + std::unique_ptr release(size_t &size, int64_t &offset) + { + size = _wr - _offset; + offset = int64_t(_offset); + + _reserved = 0; + _offset = 0; + _wr = 0; + return std::unique_ptr(_buf.release()); + } + }; + + std::vector SerialiseOffsetBuffers(ResultBuffer &res); + void FixupFbBuffers(uint8_t *model, const std::vector &offsetBufferOffset); + + class OffsetBufferDesc + { + typedef void (*DeleteFunc)(void *); + DeleteFunc _deleter = nullptr; + void *_obj = nullptr; + const uint8_t *_data = nullptr; + size_t _size = 0; + + template + static inline void DeleteVector(void *v) + { + using vec = std::vector; + delete static_cast(v); + } + + public: + template + OffsetBufferDesc(std::unique_ptr> &&buf) + { + auto *vec = buf.release(); + assert(vec); + _deleter = &OffsetBufferDesc::DeleteVector; + _obj = vec; + _data = reinterpret_cast(vec->data()); + _size = vec->size() * sizeof(T); + } + + OffsetBufferDesc(const Buffer *buffer) : OffsetBufferDesc(buffer->Data(), buffer->Size()) {} + + OffsetBufferDesc(const uint8_t *data, size_t size) : _data(data), _size(size) {} + + ~OffsetBufferDesc() + { + if ( _deleter ) + { + assert(_obj); + _deleter(_obj); + } + } + + const uint8_t *data() const { return _data; } + size_t size() const { return _size; } + }; + + std::vector _offset_buffers; + bool _useBufferOffset = false; + const size_t _fbSizeCap; + + static constexpr size_t BUFFER_ALIGNMENT = 16ULL; + void CheckFlatBufferSize(); flatbuffers::Offset SerialiseBuffer(const Buffer *buffer); - flatbuffers::Offset SerialiseBuffer(const uint8_t *data, int size); + flatbuffers::Offset SerialiseBuffer(const uint8_t *data, size_t size); struct TfLiteKernel { diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py index 5717683c..15868b26 100755 --- a/ethosu/vela/vela.py +++ b/ethosu/vela/vela.py @@ -19,6 +19,7 @@ """Compile a neural network model for Arm Ethos-U NPUs.""" import argparse import glob +import mmap import os import sys import time @@ -130,10 +131,9 @@ def process_regor( os.makedirs(output_dir, exist_ok=True) with open(input_name, "rb") as f: - network = f.read() - fmt = get_format(network) - - compiled_model = regor.compile(accelerator, network, fmt, system_config, options=options, verbose=True) + with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as network: + fmt = get_format(network) + compiled_model = regor.compile(accelerator, network, fmt, system_config, options=options, verbose=True) model_name = os.path.splitext(os.path.basename(input_name))[0] @@ -851,10 +851,10 @@ def get_compiler_config( return config -def get_format(in_data: bytes) -> str: +def get_format(in_data: mmap.mmap) -> str: """Infere format based on input file.""" ret = "UNDEFINED" - if len(in_data) < 8: + if in_data.size() < 8: return ret second_word = int.from_bytes(in_data[4:8], "little") if second_word == TFLITE_MAGIC: -- GitLab