venerdì 19 aprile 2024

Frane da drone con rete UNET

Alla ricerca di immagini di training gia' pronte per reti neurali mi sono imbattuto nel CAS Landslide Database (scaricabile da https://zenodo.org/records/10294997). Sono oltre Gb di immagini tif con immagini di frane sia da satellite che da drone con gia' pronta la maschera della verita' a terra. (avevo gia' provato in questo post)

Il database e' stato oggetto di un articolo di Nature

A questo link viene riportata anche una tabella comparativa di diversi metodi sviluppati sul medesimo dataset


 

Per una prova ho abbondato Colab per installare Tensorflow e Jupyter Notebook su Mac Air M1 e vedere se poteva essere una base di sviluppo. 

conda config --set auto_activate_base false

conda create --name mlp python=3.8 

conda activate mlp   

conda install -c apple tensorflow-deps    

pip install tensorflow-macos   

pip install tensorflow-metal

conda install jupyter pandas numpy matplotlib scikit-learn

jupyter notebook        


Le immagini del dataset Moxitaidi (UAV-0.6m) sono state trasformate in jpg ( magick mogrify -format jpg *.tif) e ridotte 256x256 (mogrify -resize 256x256 *.jpg) tramite imagemagick e sono state rinominate le maschere tramite il comando bash (for f in *.png; do mv "$f" "${f//mask/image}"; done ). Rispetto a quanto provato qui il numero delle epochs e' stato ridotto a 40. M1 si e' comportato egregiamente come velocita' (circa 70 secondi per ogni epoch)


#!/usr/bin/env python
# coding: utf-8

# In[1]:


from functools import partial
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras


# In[2]:


images_dir = './UAVjpg256/img/'
masks_dir = './UAVjpg256/mask/'

dirname, _, filenames = next(os.walk(images_dir))


@tf.function
def load_img_with_mask(image_path, images_dir: str = 'img', masks_dir: str = 'label',images_extension: str = 'jpg', masks_extension: str = 'jpg') -> dict:
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)

mask_filename = tf.strings.regex_replace(image_path, images_dir, masks_dir)
mask_filename = tf.strings.regex_replace(mask_filename, images_extension, masks_extension)
mask = tf.io.read_file(mask_filename)
mask = tf.image.decode_image(mask, channels=1, expand_animations = False)
return (image, mask)

n_examples = 3
examples = [load_img_with_mask(os.path.join(images_dir, filenames[i])) for i in range(n_examples)]

fig, axs = plt.subplots(n_examples, 2, figsize=(14, n_examples*7), constrained_layout=True)
for ax, (image, mask) in zip(axs, examples):
ax[0].imshow(image)
ax[1].imshow(mask)
plt.show()


# In[3]:


@tf.function
def resize_images(images, masks, max_image_size=1500):
shape = tf.shape(images)
scale = (tf.reduce_max(shape) // max_image_size) + 1
target_height, target_width = shape[-3] // scale, shape[-2] // scale
images = tf.cast(images, tf.float32)
masks = tf.cast(masks, tf.float32)
if scale != 1:
images = tf.image.resize(images, (target_height, target_width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
masks = tf.image.resize(masks, (target_height, target_width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return (images, masks)

@tf.function
def scale_values(images, masks, mask_split_threshold = 128):
images = tf.math.divide(images, 255)
masks = tf.where(masks > mask_split_threshold, 1, 0)
return (images, masks)

@tf.function
def pad_images(images, masks, pad_mul=16, offset=0):
shape = tf.shape(images)
height, width = shape[-3], shape[-2]
target_height = height + tf.math.floormod(tf.math.negative(height), pad_mul)
target_width = width + tf.math.floormod(tf.math.negative(width), pad_mul)
images = tf.image.pad_to_bounding_box(images, offset, offset, target_height, target_width)
masks = tf.cast(tf.image.pad_to_bounding_box(masks, offset, offset, target_height, target_width), tf.uint8)
return (images, masks)

batch_size = 32
test_set_size = 200
validation_set_size = 150


# In[4]:


dataset = tf.data.Dataset.list_files(images_dir + '*.jpg', seed=42)

test_dataset = dataset.take(test_set_size)
dataset = dataset.skip(test_set_size)
test_dataset = test_dataset.map(load_img_with_mask)
test_dataset = test_dataset.map(scale_values)
test_dataset = test_dataset.shuffle(20)
test_dataset = test_dataset.map(lambda img, mask: resize_images(img, mask, max_image_size=2500))
test_dataset = test_dataset.map(pad_images)
test_dataset = test_dataset.batch(1).prefetch(5)


validation_dataset = dataset.take(validation_set_size)
train_dataset = dataset.skip(validation_set_size)
validation_dataset = validation_dataset.map(load_img_with_mask)
validation_dataset = validation_dataset.map(scale_values)
validation_dataset = validation_dataset.shuffle(20)
validation_dataset = validation_dataset.map(resize_images)
validation_dataset = validation_dataset.map(pad_images)
validation_dataset = validation_dataset.batch(1).prefetch(5)

train_dataset = train_dataset.map(load_img_with_mask)
train_dataset = train_dataset.map(scale_values)
train_dataset = train_dataset.shuffle(20)
train_dataset = train_dataset.map(resize_images)
train_dataset = train_dataset.map(pad_images)
train_dataset = train_dataset.batch(1).prefetch(5)


# In[5]:


def get_unet(hidden_activation='relu', initializer='he_normal', output_activation='sigmoid'):
PartialConv = partial(keras.layers.Conv2D,
activation=hidden_activation,
kernel_initializer=initializer,
padding='same')
# Encoder
model_input = keras.layers.Input(shape=(None, None, 3))
enc_cov_1 = PartialConv(32, 3)(model_input)
enc_cov_1 = PartialConv(32, 3)(enc_cov_1)
enc_pool_1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_1)
enc_cov_2 = PartialConv(64, 3)(enc_pool_1)
enc_cov_2 = PartialConv(64, 3)(enc_cov_2)
enc_pool_2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_2)
enc_cov_3 = PartialConv(128, 3)(enc_pool_2)
enc_cov_3 = PartialConv(128, 3)(enc_cov_3)
enc_pool_3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_3)
# Center
center_cov = PartialConv(256, 3)(enc_pool_3)
center_cov = PartialConv(256, 3)(center_cov)
# Decoder
upsampling1 = keras.layers.UpSampling2D(size=(2, 2))(center_cov)
dec_up_conv_1 = PartialConv(128, 2)(upsampling1)
dec_merged_1 = tf.keras.layers.Concatenate(axis=3)([enc_cov_3, dec_up_conv_1])
dec_conv_1 = PartialConv(128, 3)(dec_merged_1)
dec_conv_1 = PartialConv(128, 3)(dec_conv_1)
upsampling2 = keras.layers.UpSampling2D(size=(2, 2))(dec_conv_1)
dec_up_conv_2 = PartialConv(64, 2)(upsampling2)
dec_merged_2 = tf.keras.layers.Concatenate(axis=3)([enc_cov_2, dec_up_conv_2])
dec_conv_2 = PartialConv(64, 3)(dec_merged_2)
dec_conv_2 = PartialConv(64, 3)(dec_conv_2)
upsampling3 = keras.layers.UpSampling2D(size=(2, 2))(dec_conv_2)
dec_up_conv_3 = PartialConv(32, 2)(upsampling3)
dec_merged_3 = tf.keras.layers.Concatenate(axis=3)([enc_cov_1, dec_up_conv_3])
dec_conv_3 = PartialConv(32, 3)(dec_merged_3)
dec_conv_3 = PartialConv(32, 3)(dec_conv_3)
output = keras.layers.Conv2D(1, 1, activation=output_activation)(dec_conv_3)
return tf.keras.Model(inputs=model_input, outputs=output)



model = get_unet()

optimizer = tf.keras.optimizers.Nadam()
model.compile(loss='binary_crossentropy', optimizer=optimizer)

model.summary()


# In[6]:


early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
lr_reduce = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, verbose=1)

epochs = 40
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[early_stopping, lr_reduce])


# In[7]:


get_ipython().system('mkdir -p saved_model')
model.save('saved_model/landslide_drone')


# In[8]:


converter = tf.lite.TFLiteConverter.from_saved_model('saved_model/landslide_drone')
tflite_model = converter.convert()

with open('model.tflite', 'wb') as f:
f.write(tflite_model)


# In[9]:


n_examples = 10

fig, axs = plt.subplots(n_examples, 3, figsize=(14, n_examples*7), constrained_layout=True)
for ax, ele in zip(axs, test_dataset.take(n_examples)):
image, y_true = ele
prediction = model.predict(image)[0]
prediction = tf.where(prediction > 0.6, 255, 0)
ax[0].set_title('Original image')
ax[0].imshow(image[0])
ax[1].set_title('Original mask')
ax[1].imshow(y_true[0])
ax[2].set_title('Predicted area')
ax[2].imshow(prediction)

plt.show()


# In[10]:


meanIoU = tf.keras.metrics.MeanIoU(num_classes=2)
for ele in test_dataset.take(test_set_size):
image, y_true = ele
prediction = model.predict(image)[0]
prediction = tf.where(prediction > 0.5, 1, 0)
meanIoU.update_state(y_true[0], prediction)
print(meanIoU.result().numpy())

Questi sono due esempi dei risulati




martedì 16 aprile 2024

Sigaretta Elettronica

Avevo visto qualche tempo fa un documentario sulla RAI Svizzera sul carico inquinante delle sigarette elettroniche monouso

Ne ho trovata una gettata davanti a casa ed e' partito lo smontaggio


Fondamentalmente e' estremamente semplice...c'e' un contenitore in cui c'e' una sostanza bianca impregnata della sostanza da aspirare (credo cotone), una batteria ed due fili che si connettono una sorta di maglia metallica che per effetto joule si riscalda


La batteria e' "di pregio" (modello 13100 da 3.7V ) in quanto e' al litio da 550 mAh


Nella base e' contenuta una capsula forata che credo sia un sensore di pressione che funziona da interruttore (tipo questo https://www.electroncomponents.com/micro-air-pressure-sensor-for-electronic-cigarette)




Sicuramente non dovrebbero essere gettate a terra ed altrettanto sicuramente avrebbero bisogno di uno smaltimento ideneo 

Ho provato a vedere la tensione residua nella batteria che era inferiore ai 3 V

Frugando su Internet ho trovato un video esattamente dello stesso modello







domenica 14 aprile 2024

Tensorflow segmentazione UNET per frane

Ho trovato su Kaggle questo post in cui si applica la segmentazione tramite rete neurale UNET a corpi idrici

In estrema sintesi al contrario di Yolo la segmentazione non avviene tramite un box attorno all'oggetto di interesse ma tramite una maschera di pixel di forma arbitraria

Funziona per le frane? (ovviamente non sono il primo a pensarci..volevo solo provare). Ho trovata su Kaggle questo dataset in cui sono gia' disponibili le immagini e le maschere (e' un sottoinsieme di un dataset creato per la competizione Landslide4sense 2022 molto piu' voluminoso 3Gb che si trova a questo indirizzo)


Nel dataset ci sono due coppie di immagini. La prima e' un tassello Sentinel 2 truecolor di 128x128 pixel

La seconda una immagine maschera sempre di 128x128 pixel di tipo binario (frana/no frana) realizzata con intervento umano di fotointerpretazione come verita' a terra



La disposizione dei colori e' particolare come si vede dall'istogramma. Ogni colore e' una classe e le classi sono consecutive a partire dal nero. Se si apre il file maschera in Gimp non si vede niente se non si modifica la curva colori



ho applicato, usando Colab e la GPU allegata, lo script per i corpi idrici al dataset delle frane

L'unica modifica che e' ho fatto sul dataset e' stato rinominare le maschere. Il nome del file nella cartella Images deve essere identico a quello della corrispettiva cartella Masks



from functools import partial
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras




images_dir = 'water/Images'
masks_dir = 'water/Masks'

dirname, _, filenames = next(os.walk(images_dir))



@tf.function
def load_img_with_mask(image_path, images_dir: str = 'Images', masks_dir: str = 'Masks',images_extension: str = 'jpg', masks_extension: str = 'jpg') -> dict:
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)

mask_filename = tf.strings.regex_replace(image_path, images_dir, masks_dir)
mask_filename = tf.strings.regex_replace(mask_filename, images_extension, masks_extension)
mask = tf.io.read_file(mask_filename)
mask = tf.image.decode_image(mask, channels=1, expand_animations = False)
return (image, mask)

n_examples = 3
examples = [load_img_with_mask(os.path.join(images_dir, filenames[i])) for i in range(n_examples)]

fig, axs = plt.subplots(n_examples, 2, figsize=(14, n_examples*7), constrained_layout=True)
for ax, (image, mask) in zip(axs, examples):
ax[0].imshow(image)
ax[1].imshow(mask)
plt.show()



@tf.function
def resize_images(images, masks, max_image_size=1500):
shape = tf.shape(images)
scale = (tf.reduce_max(shape) // max_image_size) + 1
target_height, target_width = shape[-3] // scale, shape[-2] // scale
images = tf.cast(images, tf.float32)
masks = tf.cast(masks, tf.float32)
if scale != 1:
images = tf.image.resize(images, (target_height, target_width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
masks = tf.image.resize(masks, (target_height, target_width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return (images, masks)

@tf.function
def scale_values(images, masks, mask_split_threshold = 128):
images = tf.math.divide(images, 255)
masks = tf.where(masks > mask_split_threshold, 1, 0)
return (images, masks)

@tf.function
def pad_images(images, masks, pad_mul=16, offset=0):
shape = tf.shape(images)
height, width = shape[-3], shape[-2]
target_height = height + tf.math.floormod(tf.math.negative(height), pad_mul)
target_width = width + tf.math.floormod(tf.math.negative(width), pad_mul)
images = tf.image.pad_to_bounding_box(images, offset, offset, target_height, target_width)
masks = tf.cast(tf.image.pad_to_bounding_box(masks, offset, offset, target_height, target_width), tf.uint8)
return (images, masks)

batch_size = 32
test_set_size = 300
validation_set_size = 250

dataset = tf.data.Dataset.list_files(images_dir + '/*.jpg', seed=42)

test_dataset = dataset.take(test_set_size)
dataset = dataset.skip(test_set_size)
test_dataset = test_dataset.map(load_img_with_mask)
test_dataset = test_dataset.map(scale_values)
test_dataset = test_dataset.shuffle(20)
test_dataset = test_dataset.map(lambda img, mask: resize_images(img, mask, max_image_size=2500))
test_dataset = test_dataset.map(pad_images)
test_dataset = test_dataset.batch(1).prefetch(5)


validation_dataset = dataset.take(validation_set_size)
train_dataset = dataset.skip(validation_set_size)
validation_dataset = validation_dataset.map(load_img_with_mask)
validation_dataset = validation_dataset.map(scale_values)
validation_dataset = validation_dataset.shuffle(20)
validation_dataset = validation_dataset.map(resize_images)
validation_dataset = validation_dataset.map(pad_images)
validation_dataset = validation_dataset.batch(1).prefetch(5)

train_dataset = train_dataset.map(load_img_with_mask)
train_dataset = train_dataset.map(scale_values)
train_dataset = train_dataset.shuffle(20)
train_dataset = train_dataset.map(resize_images)
train_dataset = train_dataset.map(pad_images)
train_dataset = train_dataset.batch(1).prefetch(5)

def get_unet(hidden_activation='relu', initializer='he_normal', output_activation='sigmoid'):
PartialConv = partial(keras.layers.Conv2D,
activation=hidden_activation,
kernel_initializer=initializer,
padding='same')
# Encoder
model_input = keras.layers.Input(shape=(None, None, 3))
enc_cov_1 = PartialConv(32, 3)(model_input)
enc_cov_1 = PartialConv(32, 3)(enc_cov_1)
enc_pool_1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_1)
enc_cov_2 = PartialConv(64, 3)(enc_pool_1)
enc_cov_2 = PartialConv(64, 3)(enc_cov_2)
enc_pool_2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_2)
enc_cov_3 = PartialConv(128, 3)(enc_pool_2)
enc_cov_3 = PartialConv(128, 3)(enc_cov_3)
enc_pool_3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(enc_cov_3)
# Center
center_cov = PartialConv(256, 3)(enc_pool_3)
center_cov = PartialConv(256, 3)(center_cov)
# Decoder
upsampling1 = keras.layers.UpSampling2D(size=(2, 2))(center_cov)
dec_up_conv_1 = PartialConv(128, 2)(upsampling1)
dec_merged_1 = tf.keras.layers.Concatenate(axis=3)([enc_cov_3, dec_up_conv_1])
dec_conv_1 = PartialConv(128, 3)(dec_merged_1)
dec_conv_1 = PartialConv(128, 3)(dec_conv_1)
upsampling2 = keras.layers.UpSampling2D(size=(2, 2))(dec_conv_1)
dec_up_conv_2 = PartialConv(64, 2)(upsampling2)
dec_merged_2 = tf.keras.layers.Concatenate(axis=3)([enc_cov_2, dec_up_conv_2])
dec_conv_2 = PartialConv(64, 3)(dec_merged_2)
dec_conv_2 = PartialConv(64, 3)(dec_conv_2)
upsampling3 = keras.layers.UpSampling2D(size=(2, 2))(dec_conv_2)
dec_up_conv_3 = PartialConv(32, 2)(upsampling3)
dec_merged_3 = tf.keras.layers.Concatenate(axis=3)([enc_cov_1, dec_up_conv_3])
dec_conv_3 = PartialConv(32, 3)(dec_merged_3)
dec_conv_3 = PartialConv(32, 3)(dec_conv_3)
output = keras.layers.Conv2D(1, 1, activation=output_activation)(dec_conv_3)
return tf.keras.Model(inputs=model_input, outputs=output)



model = get_unet()

optimizer = tf.keras.optimizers.Nadam()
model.compile(loss='binary_crossentropy', optimizer=optimizer)

model.summary()



early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
lr_reduce = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, verbose=1)

epochs = 80
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[early_stopping, lr_reduce])


n_examples = 10

fig, axs = plt.subplots(n_examples, 3, figsize=(14, n_examples*7), constrained_layout=True)
for ax, ele in zip(axs, test_dataset.take(n_examples)):
image, y_true = ele
prediction = model.predict(image)[0]
prediction = tf.where(prediction > 0.5, 255, 0)
ax[0].set_title('Original image')
ax[0].imshow(image[0])
ax[1].set_title('Original mask')
ax[1].imshow(y_true[0])
ax[2].set_title('Predicted area')
ax[2].imshow(prediction)

plt.show()

meanIoU = tf.keras.metrics.MeanIoU(num_classes=2)
for ele in test_dataset.take(test_set_size):
image, y_true = ele
prediction = model.predict(image)[0]
prediction = tf.where(prediction > 0.5, 1, 0)
meanIoU.update_state(y_true[0], prediction)
print(meanIoU.result().numpy())



Questi sono alcuni confronti della validazione con a sinistra l'immagine Sentinel, al centro la maschera fotointerpretata da utente umano ed a destra la maschera di predizione della rete neurale






la accuratezza e' molto buona (forse troppa ...avro' fatto qualche errore?) pari a 0.98




giovedì 11 aprile 2024

Inferenza TF Lite su CPU e TPU

Per questa prova e' stato selezionato il modello di esempio da questo link riguardante il dataset 900+ birds iNaturalist 2017

 

Per prima cosa c'e' da sottolineare che i modelli per CPU non sono compatibili con le TPU e viceversa. Inoltre l'immagine con cui si vuole fare inferenza deve essere dello stesso tipo (float32,uint8) e delle stesse dimensioni del modello.

CPU

python3 esempio.py --model_file mobilenet_v2_1.0_224_inat_bird_quant.tflite --label_file inat_bird_labels.txt --image ./images/th-1232548430.jpg
 

Risultato

0.831373: Cyanistes caeruleus (Eurasian Blue Tit)
0.027451: Parus major (Great Tit)
0.003922: Coereba flaveola (Bananaquit)
0.003922: Vireo philadelphicus (Philadelphia Vireo)
0.003922: Myiozetetes similis (Social Flycatcher)

 

import argparse
import time

import numpy as np
from PIL import Image
import tensorflow as tf


def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--image',
default='/tmp/grace_hopper.bmp',
help='image to be classified')
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument(
'--input_mean',
default=127.5, type=float,
help='input_mean')
parser.add_argument(
'--input_std',
default=127.5, type=float,
help='input standard deviation')
parser.add_argument(
'--num_threads', default=None, type=int, help='number of threads')
parser.add_argument(
'-e', '--ext_delegate', help='external_delegate_library path')
parser.add_argument(
'-o',
'--ext_delegate_options',
help='external delegate options, \
format: "option1: value1; option2: value2"')

args = parser.parse_args()

ext_delegate = None
ext_delegate_options = {}

# parse extenal delegate options
if args.ext_delegate_options is not None:
options = args.ext_delegate_options.split(';')
for o in options:
kv = o.split(':')
if (len(kv) == 2):
ext_delegate_options[kv[0].strip()] = kv[1].strip()
else:
raise RuntimeError('Error parsing delegate option: ' + o)

# load external delegate
if args.ext_delegate is not None:
print('Loading external delegate from {} with args: {}'.format(
args.ext_delegate, ext_delegate_options))
ext_delegate = [
tflite.load_delegate(args.ext_delegate, ext_delegate_options)
]

interpreter = tf.lite.Interpreter(
model_path=args.model_file,
experimental_delegates=ext_delegate,
num_threads=args.num_threads)
interpreter.allocate_tensors()

input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

# check the type of the input tensor
floating_model = input_details[0]['dtype'] == np.float32

# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image).resize((width, height))

# add N dim
input_data = np.expand_dims(img, axis=0)

if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std

interpreter.set_tensor(input_details[0]['index'], input_data)

start_time = time.time()
interpreter.invoke()
stop_time = time.time()

output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)

top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))

print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))


 

GPU

python3 inferenza.py (attenzione python e basta genera errore su modulo pathlib)

 

Risultato

Cyanistes caeruleus (Eurasian Blue Tit): 0.81250
 

======================================

import os
import pathlib
from pycoral.utils import edgetpu
from pycoral.utils import dataset
from pycoral.adapters import common
from pycoral.adapters import classify
from PIL import Image

# Specify the TensorFlow model, labels, and image
script_dir = pathlib.Path(__file__).parent.absolute()
model_file = os.path.join(script_dir, 'mobilenet_v2_1.0_224_quant_edgetpu.tflite')
label_file = os.path.join(script_dir, 'imagenet_labels.txt')
image_file = os.path.join(script_dir, 'parrot.jpg')

# Initialize the TF interpreter
interpreter = edgetpu.make_interpreter(model_file)
interpreter.allocate_tensors()

# Resize the image
size = common.input_size(interpreter)
image = Image.open(image_file).convert('RGB').resize(size, Image.ANTIALIAS)

# Run an inference
common.set_input(interpreter, image)
interpreter.invoke()
classes = classify.get_classes(interpreter, top_k=1)

# Print the result
labels = dataset.read_label_file(label_file)
for c in classes:
  print('%s: %.5f' % (labels.get(c.id, c.id), c.score)) 

======================================

Tour de France Firenze Gran Depart

  Poagcar Wout Van Aert Vingegaard       Van Der Poel