Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions examples/mobilenet/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
cmake_minimum_required(VERSION 3.5)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(mobilenet)
20 changes: 20 additions & 0 deletions examples/mobilenet/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@

This example shows how to run a MobileNet V2 model using `esp-tflite-micro`.
The model is int8 quantised with `alpha=0.35`.

**Note:-** MobileNet V2's classifier head can be easily modified to detect a custom number of labels after some re-training and fine-tuning. We have created another example to show how to do this along with a jupyter notebook to generate your own custom models. Check it out in the `mobilenet_custom` example.

## How to test with your own images?
This example uses raw image data stored in `image.cc` file. You can easily run it with your own images by doing the following:-

1. Convert image to raw format
```bash
python img_convert.py <img_path>
```

2. Convert raw file to .cc file
```bash
xxd -i <raw_img_path> >> main/image.cc
```

3. Rename variables and add `image.h` header include in `image.cc`
19 changes: 19 additions & 0 deletions examples/mobilenet/img_convert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@

import sys
import numpy as np
from tensorflow.keras.utils import load_img, img_to_array
from array import array

def convert_and_save(path):
img = img_to_array(load_img(path, target_size=(224, 224)))
img = array('B', img.flatten().astype(np.uint8))
with open("img.raw", "wb") as f:
f.write(img)

if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python image_convert.py img_path")
exit(0)

img_path = sys.argv[1]
convert_and_save(img_path)
4 changes: 4 additions & 0 deletions examples/mobilenet/main/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
idf_component_register(
SRCS "main.cc" "model.cc" "image.cc"
PRIV_REQUIRES
INCLUDE_DIRS "")
4 changes: 4 additions & 0 deletions examples/mobilenet/main/idf_component.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
dependencies:
espressif/esp-tflite-micro:
version: "*"
override_path: "../../../"
12,549 changes: 12,549 additions & 0 deletions examples/mobilenet/main/image.cc

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions examples/mobilenet/main/image.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@

#pragma once

extern const unsigned char image_raw[];
extern const unsigned int image_raw_len;
108 changes: 108 additions & 0 deletions examples/mobilenet/main/main.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@

#include <freertos/FreeRTOS.h>
#include <esp_log.h>
#include <esp_timer.h>

#include <tensorflow/lite/micro/micro_interpreter.h>
#include <tensorflow/lite/micro/micro_log.h>
#include <tensorflow/lite/micro/micro_profiler.h>
#include <tensorflow/lite/micro/micro_mutable_op_resolver.h>
#include <tensorflow/lite/schema/schema_generated.h>

#include "model.h"
#include "image.h"

static const char* TAG = "mobilenet";

namespace {
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;

constexpr int kTensorArenaSize = 1.5 * 1024 * 1024;
static uint8_t* tensor_arena;

TfLiteTensor* input;
TfLiteTensor* output;
}

int8_t quantize(float val) {
auto zero_point = input->params.zero_point;
auto scale = input->params.scale;
return (val / scale) + zero_point;
}

float dequantize(int8_t val) {
auto zero_point = output->params.zero_point;
auto scale = output->params.scale;
return (val - zero_point) * scale;
}

extern "C" void app_main(void)
{
model = tflite::GetModel(esp_mobile_net_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
MicroPrintf("Model provided is schema version %d not equal to supported version %d.", model->version(), TFLITE_SCHEMA_VERSION);
}

if (tensor_arena == NULL) {
tensor_arena = (uint8_t*)heap_caps_malloc(kTensorArenaSize, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
}

if (tensor_arena == NULL) {
printf("Couldn't allocate memory of %d bytes\n", kTensorArenaSize);
return;
}

ESP_LOGI(TAG, "Allocated memory for Tensor Arena");

static tflite::MicroMutableOpResolver<7> micro_op_resolver;
micro_op_resolver.AddRelu6();
micro_op_resolver.AddConv2D();
micro_op_resolver.AddDepthwiseConv2D();
micro_op_resolver.AddAdd();
micro_op_resolver.AddMean();
micro_op_resolver.AddFullyConnected();
micro_op_resolver.AddSoftmax();

static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize
);
interpreter = &static_interpreter;

if (interpreter->AllocateTensors() != kTfLiteOk) {
MicroPrintf("AllocateTensors() failed");
return;
}

ESP_LOGI(TAG, "Allocated Tensors");

input = interpreter->input(0);
output = interpreter->output(0);

for (int i = 0; i < image_raw_len; i++) {
input->data.int8[i] = quantize(((uint8_t)image_raw[i] / 127.5) - 1);
}

long long start_time = esp_timer_get_time();

if (interpreter->Invoke() != kTfLiteOk) {
MicroPrintf("Invoke() failed");
}

long long total_time = esp_timer_get_time() - start_time;
ESP_LOGI(TAG, "Invoke was successful");
printf("Invoke: Total time = %lld\n", total_time / 1000);

int maxLabel = 0;
float maxConf = 0.0;

for (int i = 0; i < 1000; i++) {
float conf = dequantize(output->data.int8[i]);
if (conf > maxConf) {
maxLabel = i;
maxConf = conf;
}
}

printf("\nLabel: %d, Confidence: %f\n", maxLabel, maxConf);
}
Loading