Skip to content

Commit 1355a9a

Browse files
committed
Merge branch 'ci/sync_gh_tflite-micro' into 'master'
Sync esp-tflite-micro from github - 744684 See merge request app-frameworks/esp-tflite-micro!150
2 parents e63e91b + 7138c52 commit 1355a9a

File tree

13 files changed

+106
-35
lines changed

13 files changed

+106
-35
lines changed

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ cmake_minimum_required(VERSION 3.5)
44

55
set(tflite_dir "${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/lite")
66
set(signal_dir "${CMAKE_CURRENT_SOURCE_DIR}/signal")
7+
set(compiler_mlir_dir "${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/compiler/mlir/")
78
set(tfmicro_dir "${tflite_dir}/micro")
89
set(tfmicro_frontend_dir "${tflite_dir}/experimental/microfrontend/lib")
910
set(tfmicro_kernels_dir "${tfmicro_dir}/kernels")
@@ -77,7 +78,7 @@ set(lib_srcs
7778
"${tflite_dir}/kernels/internal/tensor_ctypes.cc"
7879
"${tflite_dir}/kernels/internal/reference/portable_tensor_utils.cc"
7980
"${tflite_dir}/kernels/internal/reference/comparisons.cc"
80-
"${tflite_dir}/schema/schema_utils.cc")
81+
"${compiler_mlir_dir}/lite/schema/schema_utils.cc")
8182

8283
set(priv_req)
8384

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
17+
#define TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
18+
19+
#include "tensorflow/lite/kernels/internal/compatibility.h" // IWYU pragma: keep
20+
21+
#endif // TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_
16+
#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_
17+
18+
// This file should only be used by the make build to redirect schema_utils.cc
19+
// usage of the generated schema to the proper location.
20+
#include "tensorflow/lite/schema/schema_generated.h" // IWYU pragma: keep
21+
22+
#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_

tensorflow/lite/schema/schema_utils.cc renamed to tensorflow/compiler/mlir/lite/schema/schema_utils.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License.
1414
==============================================================================*/
15-
#include "tensorflow/lite/schema/schema_utils.h"
15+
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
1616

1717
#include <algorithm>
1818

19-
#include "tensorflow/lite/kernels/internal/compatibility.h"
19+
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
2020

2121
namespace tflite {
2222

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
16+
#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
17+
18+
#include "flatbuffers/flatbuffers.h"
19+
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
20+
21+
namespace tflite {
22+
23+
// The following methods are introduced to resolve op builtin code shortage
24+
// problem. The new builtin operator will be assigned to the extended builtin
25+
// code field in the flatbuffer schema. Those methods helps to hide builtin code
26+
// details.
27+
BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);
28+
29+
BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);
30+
31+
} // namespace tflite
32+
33+
#endif // TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_

tensorflow/lite/core/macros.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,16 +65,4 @@ limitations under the License.
6565
#define TFLITE_HAS_ATTRIBUTE_WEAK 0
6666
#endif
6767

68-
#ifndef TF_LITE_STATIC_MEMORY
69-
// maximum size of a valid flatbuffer
70-
inline constexpr unsigned int flatbuffer_size_max = 2147483648;
71-
// If none zero then the buffer is stored outside of the flatbuffers, string
72-
inline constexpr char tflite_metadata_buffer_location[] = "buffer_location";
73-
// field for minimum runtime version, string
74-
inline constexpr char tflite_metadata_min_runtime_version[] =
75-
"min_runtime_version";
76-
// the stablehlo op version is supported by the tflite runtime
77-
inline constexpr char tflite_supported_stablehlo_version[] = "1.0.0";
78-
#endif
79-
8068
#endif // TENSORFLOW_LITE_CORE_MACROS_H_

tensorflow/lite/kernels/internal/portable_tensor_utils.cc

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,12 @@ void ApplySignbitToVector(const float* __restrict__ vector, int v_size,
7070

7171
void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements,
7272
int8_t* dst_buffer) {
73+
// num_elements means the number of elements regardless of packed or unpacked.
74+
// For example, 3 elements means both
75+
// 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes.
76+
// stored in src_buffer[0] and src_buffer[1] (i = 0..1)
77+
// 2) Unpacked: 3 int8's = 3 bytes.
78+
//. stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2)
7379
for (int i = 0; i < num_elements / 2; i++) {
7480
int8_t byte = src_buffer[i];
7581
// Shift left first so that sign is properly extended when shifted right

tensorflow/lite/micro/micro_allocation_info.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,8 @@ class AllocationInfoBuilder {
129129

130130
const tflite::Model* model_ = nullptr;
131131
INonPersistentBufferAllocator* non_persistent_allocator_ = nullptr;
132-
GraphAllocationInfo info_;
132+
GraphAllocationInfo info_ =
133+
{}; // Prevents problems caused by accessing uninitialized memory.
133134
int allocation_scope_count_ = 0;
134135
};
135136

tensorflow/lite/micro/micro_allocator.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,9 @@ class MicroAllocator {
322322
IPersistentBufferAllocator* persistent_buffer_allocator_;
323323

324324
// Allocator used to allocate persistent builtin data.
325-
TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_;
325+
TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_ =
326+
nullptr; // Initialized as nullptr to prevent any possible issues related
327+
// to accessing uninitialized memory.
326328

327329
// Activation buffer memory planner.
328330
MicroMemoryPlanner* memory_planner_;

tensorflow/lite/micro/micro_interpreter_graph.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,9 @@ class MicroInterpreterGraph : public MicroGraph {
106106
int current_subgraph_index_;
107107
uint32_t current_operator_index_;
108108
MicroResourceVariables* resource_variables_;
109-
const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs_;
109+
const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs_ =
110+
nullptr; // Initialized as nullptr to prevent any possible issues
111+
// related to accessing uninitialized memory.
110112

111113
TF_LITE_REMOVE_VIRTUAL_DELETE
112114
};

0 commit comments

Comments
 (0)