Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 50 additions & 4 deletions tensorflow/lite/micro/micro_interpreter_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ limitations under the License.

#include "tensorflow/lite/micro/micro_interpreter_graph.h"

#include <algorithm>

#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
Expand Down Expand Up @@ -42,6 +44,34 @@ const char* OpNameFromRegistration(const TFLMRegistration* registration) {
}
}

// Check tensor shapes to determine if there are dynamic tensors present.
// Returns the index of the first dynamic tensor found, otherwise returns -1.
int CheckDynamicTensors(const TfLiteIntArray* const tensor_indices,
const TfLiteEvalTensor* const eval_tensors) {
// some operators have no tensors, so node->inputs and/or node->outputs
// can be <nullptr>. This occurs in the MicroInterpreter unit tests.
if (tensor_indices == nullptr) {
return -1;
}

for (int i = 0; i < tensor_indices->size; i++) {
const int tensor_index = tensor_indices->data[i];
// Skip optional tensors
if (tensor_index < 0) {
continue;
}
// Check shape for dims <= 0.
// This code handles legacy scalar tensors (dims->size == 0).
const TfLiteEvalTensor* const tp = eval_tensors + tensor_index;
if (!std::all_of(tp->dims->data, tp->dims->data + tp->dims->size,
[](int dim) { return dim > 0; })) {
return tensor_index;
}
}

return -1;
}

} // namespace

MicroInterpreterGraph::MicroInterpreterGraph(
Expand Down Expand Up @@ -117,7 +147,7 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() {
if (registration->prepare != nullptr) {
TfLiteStatus prepare_status = registration->prepare(context_, node);
if (prepare_status != kTfLiteOk) {
MicroPrintf("Node %s (number %df) failed to prepare with status %d",
MicroPrintf("Node %s (number %u) failed to prepare with status %d",
OpNameFromRegistration(registration),
current_operator_index_, prepare_status);
return kTfLiteError;
Expand All @@ -126,6 +156,18 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() {
GetMicroContext(context_)->ResetDecompressionMemoryAllocations();
#endif // USE_TFLM_COMPRESSION
}

const int dynamic_tensor_index = CheckDynamicTensors(
node->outputs, subgraph_allocations_[subgraph_idx].tensors);
if (dynamic_tensor_index != -1) {
MicroPrintf(
"Op#%u (%s) of subgraph %u has dynamic tensor #%d\n"
"Dynamic tensors are not supported",
current_operator_index_, OpNameFromRegistration(registration),
current_subgraph_index_, dynamic_tensor_index);
return kTfLiteError;
}

allocator_->FinishPrepareNodeAllocations(
/*node_id=*/current_operator_index_);
}
Expand Down Expand Up @@ -205,6 +247,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
subgraph_idx, subgraphs_->size());
return kTfLiteError;
}
TfLiteStatus invoke_status = kTfLiteOk;
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
for (current_operator_index_ = 0; current_operator_index_ < operators_size;
++current_operator_index_) {
Expand All @@ -226,7 +269,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
#endif

TFLITE_DCHECK(registration->invoke);
TfLiteStatus invoke_status = registration->invoke(context_, node);
invoke_status = registration->invoke(context_, node);
#ifdef USE_TFLM_COMPRESSION
GetMicroContext(context_)->ResetDecompressionMemoryAllocations();
#endif // USE_TFLM_COMPRESSION
Expand All @@ -243,12 +286,15 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
OpNameFromRegistration(registration),
current_operator_index_, invoke_status);
}
return invoke_status;
// make sure to restore subgraph and operator indices
break;
}
}

current_subgraph_index_ = previous_subgraph_idx;
current_operator_index_ = previous_operator_idx;
return kTfLiteOk;

return invoke_status;
}

TfLiteStatus MicroInterpreterGraph::ResetVariableTensors() {
Expand Down
33 changes: 32 additions & 1 deletion tensorflow/lite/micro/micro_interpreter_test.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2025 The TensorFlow Authors. All Rights Reserved.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please keep the original year.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -778,4 +778,35 @@ TF_LITE_MICRO_TEST(TestGetTensorFailsNoLinearMemoryPlanner) {
TF_LITE_MICRO_EXPECT(interpreter.GetTensor(0) == nullptr);
}

TF_LITE_MICRO_TEST(TestDynamicTensorFails) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
uint8_t allocator_buffer[kAllocatorBufferSize];

// Use a new scope for each MicroInterpreter
{
// test with 0 in shape
const tflite::Model* model =
tflite::testing::GetNoOpModelWithTensorShape({3, 2, 0});
TF_LITE_MICRO_EXPECT(nullptr != model);
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
kAllocatorBufferSize);
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
}

// Use a new scope for each MicroInterpreter
{
// test with -1 in shape
const tflite::Model* model =
tflite::testing::GetNoOpModelWithTensorShape({3, 2, -1});
TF_LITE_MICRO_EXPECT(nullptr != model);
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
kAllocatorBufferSize);
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
}
}

TF_LITE_MICRO_TESTS_END
30 changes: 27 additions & 3 deletions tensorflow/lite/micro/test_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,14 @@ class ModelBuilder {
Operator RegisterOp(BuiltinOperator op, const char* custom_code);

// Adds a tensor to the model.
Tensor AddTensor(TensorType type, std::initializer_list<int32_t> shape) {
Tensor AddTensor(TensorType type,
const std::initializer_list<int32_t> shape) {
return AddTensorImpl(type, /* is_variable */ false, shape);
}

// Adds a variable tensor to the model.
Tensor AddVariableTensor(TensorType type,
std::initializer_list<int32_t> shape) {
const std::initializer_list<int32_t> shape) {
return AddTensorImpl(type, /* is_variable */ true, shape);
}

Expand All @@ -133,7 +134,7 @@ class ModelBuilder {
private:
// Adds a tensor to the model.
Tensor AddTensorImpl(TensorType type, bool is_variable,
std::initializer_list<int32_t> shape);
const std::initializer_list<int32_t> shape);

flatbuffers::FlatBufferBuilder* builder_;

Expand Down Expand Up @@ -1546,6 +1547,23 @@ const Model* BuildSimpleMockModelWithNullInputsOutputs() {
return model;
}

const Model* BuildNoOpModelWithTensorShape(
const std::initializer_list<int32_t>& shape) {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();

ModelBuilder model_builder(fb_builder);

// build model with 2 tensor outputs, the first with shape [3, 2]
// and the second with the supplied shape.
const int op_id = model_builder.RegisterOp(BuiltinOperator_CUSTOM, "no_op");
const int tensor_0 = model_builder.AddTensor(TensorType_INT8, {3, 2});
const int tensor_1 = model_builder.AddTensor(TensorType_INT8, shape);

model_builder.AddNode(op_id, {}, {tensor_0, tensor_1}, {});
return model_builder.BuildModel({}, {tensor_0, tensor_1});
}

} // namespace

const TFLMRegistration* SimpleStatefulOp::getRegistration() {
Expand Down Expand Up @@ -1912,6 +1930,12 @@ const Model* GetSimpleStatefulModel() {
return model;
}

const Model* GetNoOpModelWithTensorShape(
const std::initializer_list<int32_t>& shape) {
// don't cache the model as the tensor shape can be different on each call
return const_cast<Model*>(BuildNoOpModelWithTensorShape(shape));
}

const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
Expand Down
6 changes: 6 additions & 0 deletions tensorflow/lite/micro/test_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License.
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <type_traits>

Expand Down Expand Up @@ -190,6 +191,11 @@ const Model* GetModelWithIfAndSubgraphInputTensorOverlap();
// Returns a flatbuffer model with null subgraph/operator inputs and outputs.
const Model* GetSimpleModelWithNullInputsAndOutputs();

// Returns a flatbuffer model with no inputs and two outputs, the second
// of which has the supplied shape.
const Model* GetNoOpModelWithTensorShape(
const std::initializer_list<int32_t>& shape);

// Builds a one-dimensional flatbuffer tensor of the given size.
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false);

Expand Down
Loading