Skip to content

Commit 8eb17a7

Browse files
authored
Detect and fail dynamic tensors (#3181)
@tensorflow/micro Detect output tensors with dynamic shapes after the Prepare method for each operator in a subgraph has been called. Upon detection of a dynamic tensor, return kTfLiteError from the MicroInterpreter::AllocateTensors method. Add unit test for the MicroInterpreter. Fix typos in Prepare failure message. Restore current operator/subgraph index after subgraph invocation failure. bug=fixes #3180
1 parent 7a7a3de commit 8eb17a7

File tree

4 files changed

+115
-8
lines changed

4 files changed

+115
-8
lines changed

tensorflow/lite/micro/micro_interpreter_graph.cc

Lines changed: 50 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ limitations under the License.
1515

1616
#include "tensorflow/lite/micro/micro_interpreter_graph.h"
1717

18+
#include <algorithm>
19+
1820
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
1921
#include "tensorflow/lite/c/common.h"
2022
#include "tensorflow/lite/kernels/internal/compatibility.h"
@@ -42,6 +44,34 @@ const char* OpNameFromRegistration(const TFLMRegistration* registration) {
4244
}
4345
}
4446

47+
// Check tensor shapes to determine if there are dynamic tensors present.
48+
// Returns the index of the first dynamic tensor found, otherwise returns -1.
49+
int CheckDynamicTensors(const TfLiteIntArray* const tensor_indices,
50+
const TfLiteEvalTensor* const eval_tensors) {
51+
// some operators have no tensors, so node->inputs and/or node->outputs
52+
// can be <nullptr>. This occurs in the MicroInterpreter unit tests.
53+
if (tensor_indices == nullptr) {
54+
return -1;
55+
}
56+
57+
for (int i = 0; i < tensor_indices->size; i++) {
58+
const int tensor_index = tensor_indices->data[i];
59+
// Skip optional tensors
60+
if (tensor_index < 0) {
61+
continue;
62+
}
63+
// Check shape for dims <= 0.
64+
// This code handles legacy scalar tensors (dims->size == 0).
65+
const TfLiteEvalTensor* const tp = eval_tensors + tensor_index;
66+
if (!std::all_of(tp->dims->data, tp->dims->data + tp->dims->size,
67+
[](int dim) { return dim > 0; })) {
68+
return tensor_index;
69+
}
70+
}
71+
72+
return -1;
73+
}
74+
4575
} // namespace
4676

4777
MicroInterpreterGraph::MicroInterpreterGraph(
@@ -117,7 +147,7 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() {
117147
if (registration->prepare != nullptr) {
118148
TfLiteStatus prepare_status = registration->prepare(context_, node);
119149
if (prepare_status != kTfLiteOk) {
120-
MicroPrintf("Node %s (number %df) failed to prepare with status %d",
150+
MicroPrintf("Node %s (number %u) failed to prepare with status %d",
121151
OpNameFromRegistration(registration),
122152
current_operator_index_, prepare_status);
123153
return kTfLiteError;
@@ -126,6 +156,18 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() {
126156
GetMicroContext(context_)->ResetDecompressionMemoryAllocations();
127157
#endif // USE_TFLM_COMPRESSION
128158
}
159+
160+
const int dynamic_tensor_index = CheckDynamicTensors(
161+
node->outputs, subgraph_allocations_[subgraph_idx].tensors);
162+
if (dynamic_tensor_index != -1) {
163+
MicroPrintf(
164+
"Op#%u (%s) of subgraph %u has dynamic tensor #%d\n"
165+
"Dynamic tensors are not supported",
166+
current_operator_index_, OpNameFromRegistration(registration),
167+
current_subgraph_index_, dynamic_tensor_index);
168+
return kTfLiteError;
169+
}
170+
129171
allocator_->FinishPrepareNodeAllocations(
130172
/*node_id=*/current_operator_index_);
131173
}
@@ -205,6 +247,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
205247
subgraph_idx, subgraphs_->size());
206248
return kTfLiteError;
207249
}
250+
TfLiteStatus invoke_status = kTfLiteOk;
208251
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
209252
for (current_operator_index_ = 0; current_operator_index_ < operators_size;
210253
++current_operator_index_) {
@@ -226,7 +269,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
226269
#endif
227270

228271
TFLITE_DCHECK(registration->invoke);
229-
TfLiteStatus invoke_status = registration->invoke(context_, node);
272+
invoke_status = registration->invoke(context_, node);
230273
#ifdef USE_TFLM_COMPRESSION
231274
GetMicroContext(context_)->ResetDecompressionMemoryAllocations();
232275
#endif // USE_TFLM_COMPRESSION
@@ -243,12 +286,15 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
243286
OpNameFromRegistration(registration),
244287
current_operator_index_, invoke_status);
245288
}
246-
return invoke_status;
289+
// make sure to restore subgraph and operator indices
290+
break;
247291
}
248292
}
293+
249294
current_subgraph_index_ = previous_subgraph_idx;
250295
current_operator_index_ = previous_operator_idx;
251-
return kTfLiteOk;
296+
297+
return invoke_status;
252298
}
253299

254300
TfLiteStatus MicroInterpreterGraph::ResetVariableTensors() {

tensorflow/lite/micro/micro_interpreter_test.cc

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
1+
/* Copyright 2025 The TensorFlow Authors. All Rights Reserved.
22
33
Licensed under the Apache License, Version 2.0 (the "License");
44
you may not use this file except in compliance with the License.
@@ -778,4 +778,35 @@ TF_LITE_MICRO_TEST(TestGetTensorFailsNoLinearMemoryPlanner) {
778778
TF_LITE_MICRO_EXPECT(interpreter.GetTensor(0) == nullptr);
779779
}
780780

781+
TF_LITE_MICRO_TEST(TestDynamicTensorFails) {
782+
tflite::testing::TestingOpResolver op_resolver;
783+
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
784+
tflite::testing::GetTestingOpResolver(op_resolver));
785+
786+
constexpr size_t kAllocatorBufferSize = 2000;
787+
uint8_t allocator_buffer[kAllocatorBufferSize];
788+
789+
// Use a new scope for each MicroInterpreter
790+
{
791+
// test with 0 in shape
792+
const tflite::Model* model =
793+
tflite::testing::GetNoOpModelWithTensorShape({3, 2, 0});
794+
TF_LITE_MICRO_EXPECT(nullptr != model);
795+
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
796+
kAllocatorBufferSize);
797+
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
798+
}
799+
800+
// Use a new scope for each MicroInterpreter
801+
{
802+
// test with -1 in shape
803+
const tflite::Model* model =
804+
tflite::testing::GetNoOpModelWithTensorShape({3, 2, -1});
805+
TF_LITE_MICRO_EXPECT(nullptr != model);
806+
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
807+
kAllocatorBufferSize);
808+
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
809+
}
810+
}
811+
781812
TF_LITE_MICRO_TESTS_END

tensorflow/lite/micro/test_helpers.cc

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -103,13 +103,14 @@ class ModelBuilder {
103103
Operator RegisterOp(BuiltinOperator op, const char* custom_code);
104104

105105
// Adds a tensor to the model.
106-
Tensor AddTensor(TensorType type, std::initializer_list<int32_t> shape) {
106+
Tensor AddTensor(TensorType type,
107+
const std::initializer_list<int32_t> shape) {
107108
return AddTensorImpl(type, /* is_variable */ false, shape);
108109
}
109110

110111
// Adds a variable tensor to the model.
111112
Tensor AddVariableTensor(TensorType type,
112-
std::initializer_list<int32_t> shape) {
113+
const std::initializer_list<int32_t> shape) {
113114
return AddTensorImpl(type, /* is_variable */ true, shape);
114115
}
115116

@@ -133,7 +134,7 @@ class ModelBuilder {
133134
private:
134135
// Adds a tensor to the model.
135136
Tensor AddTensorImpl(TensorType type, bool is_variable,
136-
std::initializer_list<int32_t> shape);
137+
const std::initializer_list<int32_t> shape);
137138

138139
flatbuffers::FlatBufferBuilder* builder_;
139140

@@ -1546,6 +1547,23 @@ const Model* BuildSimpleMockModelWithNullInputsOutputs() {
15461547
return model;
15471548
}
15481549

1550+
const Model* BuildNoOpModelWithTensorShape(
1551+
const std::initializer_list<int32_t>& shape) {
1552+
using flatbuffers::Offset;
1553+
flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
1554+
1555+
ModelBuilder model_builder(fb_builder);
1556+
1557+
// build model with 2 tensor outputs, the first with shape [3, 2]
1558+
// and the second with the supplied shape.
1559+
const int op_id = model_builder.RegisterOp(BuiltinOperator_CUSTOM, "no_op");
1560+
const int tensor_0 = model_builder.AddTensor(TensorType_INT8, {3, 2});
1561+
const int tensor_1 = model_builder.AddTensor(TensorType_INT8, shape);
1562+
1563+
model_builder.AddNode(op_id, {}, {tensor_0, tensor_1}, {});
1564+
return model_builder.BuildModel({}, {tensor_0, tensor_1});
1565+
}
1566+
15491567
} // namespace
15501568

15511569
const TFLMRegistration* SimpleStatefulOp::getRegistration() {
@@ -1912,6 +1930,12 @@ const Model* GetSimpleStatefulModel() {
19121930
return model;
19131931
}
19141932

1933+
const Model* GetNoOpModelWithTensorShape(
1934+
const std::initializer_list<int32_t>& shape) {
1935+
// don't cache the model as the tensor shape can be different on each call
1936+
return const_cast<Model*>(BuildNoOpModelWithTensorShape(shape));
1937+
}
1938+
19151939
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) {
19161940
using flatbuffers::Offset;
19171941
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();

tensorflow/lite/micro/test_helpers.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ limitations under the License.
1919
#include <algorithm>
2020
#include <cmath>
2121
#include <cstdint>
22+
#include <initializer_list>
2223
#include <limits>
2324
#include <type_traits>
2425

@@ -190,6 +191,11 @@ const Model* GetModelWithIfAndSubgraphInputTensorOverlap();
190191
// Returns a flatbuffer model with null subgraph/operator inputs and outputs.
191192
const Model* GetSimpleModelWithNullInputsAndOutputs();
192193

194+
// Returns a flatbuffer model with no inputs and two outputs, the second
195+
// of which has the supplied shape.
196+
const Model* GetNoOpModelWithTensorShape(
197+
const std::initializer_list<int32_t>& shape);
198+
193199
// Builds a one-dimensional flatbuffer tensor of the given size.
194200
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false);
195201

0 commit comments

Comments
 (0)