lunedì 18 agosto 2025

Preparazione dati Realsense D415 per monocular depth estimation

 Volevo provare a crearmi la mia rete neurale per Monocular Depth Estimation ma non ho dati da fornire alla rete...mi sono quindi armato di Realsense di D415 e sono andato alla mia palestra (cava di Maiano vicino a Firenze) a fare qualche acquisizione

Il problema con i Realsense e' che il sensore di profondita' e' molto rumoroso quindi il programma di acquisizione effettua piu' scansioni e poi media

 


import pyrealsense2 as rs
import numpy as np
from PIL import Image
import os

# -------------------
# Config
# -------------------
output_rgb_dir = "rgb"
output_depth_dir = "depth"
os.makedirs(output_rgb_dir, exist_ok=True)
os.makedirs(output_depth_dir, exist_ok=True)

num_frames = 30 # Number of frames to accumulate

# -------------------
# RealSense pipeline
# -------------------
pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

profile = pipeline.start(config)

# Depth scale
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth scale:", depth_scale)

# Align depth to color
align_to = rs.stream.color
align = rs.align(align_to)

try:
print(f"Capturing {num_frames} frames...")
depth_frames_list = []
rgb_frame_to_save = None

for i in range(num_frames):
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)

color_frame = aligned_frames.get_color_frame()
depth_frame = aligned_frames.get_depth_frame()

if not color_frame or not depth_frame:
continue

# Convert frames to numpy
color_image = np.asanyarray(color_frame.get_data()) # RGB
depth_image = np.asanyarray(depth_frame.get_data()) * depth_scale # meters

depth_frames_list.append(depth_image)

# Keep the last RGB frame (or you could store them all and take median too)
rgb_frame_to_save = color_image

# Compute median depth per pixel
depth_median = np.median(np.stack(depth_frames_list, axis=0), axis=0).astype(np.float32)

# Save RGB
rgb_filename = os.path.join(output_rgb_dir, "rgb_median.png")
Image.fromarray(rgb_frame_to_save).save(rgb_filename)

# Save depth as NPY
depth_npy_filename = os.path.join(output_depth_dir, "depth_median.npy")
np.save(depth_npy_filename, depth_median)

# Save depth as PNG (scaled to 16-bit for visualization)
depth_png_filename = os.path.join(output_depth_dir, "depth_median.png")
depth_mm = (depth_median * 1000).astype(np.uint16) # convert meters to mm
Image.fromarray(depth_mm).save(depth_png_filename)

print(f"Saved median RGB to {rgb_filename}")
print(f"Saved median depth to {depth_npy_filename} and {depth_png_filename}")

except KeyboardInterrupt:
print("Stopped by user.")

finally:
pipeline.stop()

 

I dati di profondita' presentano dei buchi dove non e' stato possibile acquisire il dato. il prossimo script usa inpainting di OpenCV per ovviare al problema (solo dati npy)

import sys
import numpy as np
import cv2
from scipy import ndimage

if len(sys.argv) != 2:
print(f"Usage: python {sys.argv[0]} depth_file.npy")
sys.exit(1)

filename = sys.argv[1]

# Load depth map
depth = np.load(filename)
mask = depth == 0

# Fill holes using nearest neighbor
nearest_filled = ndimage.distance_transform_edt(
mask,
return_distances=False,
return_indices=True
)
depth_filled = depth[tuple(nearest_filled)]

# Smooth with bilateral filter to preserve edges
depth_filtered = cv2.bilateralFilter(depth_filled.astype(np.float32), 9, 75, 75)

# Save with "_filled" suffix
output_filename = filename.replace(".npy", "_filled.npy")
np.save(output_filename, depth_filtered)

print(f"Saved filled depth map to: {output_filename}")

 

 

 

 

Intrinsics di Realsense D415

Uno dei vantaggi delle camere Realsense e' che sono gia' calibrate e si possono leggere i valori dei parametri intrinsics senza dover passare tramite la calibrazione con checkerboard

 


 

Width: 640
Height: 480
Focal length (fx, fy): 616.6863403320312 616.5963134765625
Principal point (ppx, ppy): 318.4041748046875 238.97064208984375
Distortion model: distortion.inverse_brown_conrady
Distortion coefficients: [0.0, 0.0, 0.0, 0.0, 0.0]
 

import pyrealsense2 as rs

# Create pipeline and config
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
profile = pipeline.start(config)

# Get the stream profile and intrinsics
color_stream_profile = profile.get_stream(rs.stream.color)
intr = color_stream_profile.as_video_stream_profile().get_intrinsics()

# Print intrinsics
print("Width:", intr.width)
print("Height:", intr.height)
print("Focal length (fx, fy):", intr.fx, intr.fy)
print("Principal point (ppx, ppy):", intr.ppx, intr.ppy)
print("Distortion model:", intr.model)
print("Distortion coefficients:", intr.coeffs)

pipeline.stop()


 

mercoledì 13 agosto 2025

Misura della dimensione frattale delle fratture

Visto che nel post precedente mi sono trovato praticamente servito il materiale, ho provato a calcolare la dimensione frattale delle fratture con un script Python  

 

 

 


Una volta ottenuta la dimensione frattale dell'affioramento si puo' tramite la legge

 

N(l) = numero di fratture osservabili al di sopra di una certa lunghezza 

D = dimensione frattale

 In funzione della scala di osservazione questo e' il numero di fratture attese


 

import numpy as np
import cv2
import matplotlib.pyplot as plt

def boxcount(Z, k):
    """Count the number of non-empty boxes of size k×k in a binary array Z."""
    S = np.add.reduceat(
        np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0),
        np.arange(0, Z.shape[1], k), axis=1)

    # Count non-empty (at least 1 pixel > 0) boxes
    return len(np.where(S > 0)[0])

def fractal_dimension(image_path):
    # Load image in grayscale
    img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    if img is None:
        raise FileNotFoundError(f"Image not found: {image_path}")

    # Binarize image (fractures = white, background = black)
    _, binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)

    # Ensure binary array is 0-1
    Z = binary > 0

    # Minimal dimension of image
    p = min(Z.shape)

    # Max power of two less than p
    n = 2**np.floor(np.log2(p))
    n = int(n)

    # Extract the exponent sizes (k)
    sizes = 2**np.arange(int(np.log2(n)), 1, -1)

    counts = []
    for size in sizes:
        counts.append(boxcount(Z, size))

    # Linear fit in log-log space
    coeffs = np.polyfit(np.log(sizes), np.log(counts), 1)
    fractal_dim = -coeffs[0]

    # Plot result
    plt.figure()
    plt.scatter(np.log(sizes), np.log(counts), color='blue', label="Data")
    plt.plot(np.log(sizes), np.polyval(coeffs, np.log(sizes)), color='red', label=f"Fit (D={fractal_dim:.3f})")
    plt.xlabel("log(Box size)")
    plt.ylabel("log(Count)")
    plt.legend()
    plt.show()

    return fractal_dim

# Example usage:
image_path = "image.png"  
D = fractal_dimension(image_path)
print(f"Estimated fractal dimension: {D:.3f}")


 giusto per completezza nel caso di una distribuzione random dei pixel nell'immagine la dimensione frattale risultera' pari a 2





lunedì 11 agosto 2025

Getaberget Image Segmentation

update : un articolo di Nature sullo stesso argomento

Usando i dati del post precedente ho provate le reti neurali Unet, U2Net e Deeplab per vedere quale si comportava meglio nella segmentazione 

Il progetto e' complesso e non e' possibile inserirlo in un post. E' stato quindi quindi creato un apposito repository Github. i files di training e file .h5 dei modelli sono troppo grandi per l'hosting di Github (circa 2.8 Gb) e sono depositati sul mio GDrive.

Di seguito il confronto tra l'immagine di partenza, i risultati dei tre algoritmi di segmentazione ed la machera generata in modo manuale per l'addrestramente. Da notare come il risultato migliore sia in DeepLab V3+ ma che comunque tutti gli algoritmi abbiamo trovato delle fratture nell'immagine che non erano presenti nella maschera di training

Immagine di partenza


Segmentazione U2Net

Segmentazione Unet

DeepLab V3+


Maschera di training

per finire vediamo come DeepLab V3+ si comporta come due immagini del dataset di test, ovvero che non e' mai stata usata in fase di training. Direi che e' soddisfacente










sabato 9 agosto 2025

Preparazione dati per segmentazione rete neurale

 Ho trovato in rete un dataset di foto da drone con gia' classificate le fratture. Si tratta di dati con licenza permissiva 


 

 

I dati derivano da questo articolo 

https://www.researchgate.net/publication/357994422_A_new_subsampling_methodology_to_optimize_the_characterization_of_two-dimensional_bedrock_fracture_networks

@article{article,
author = {Ovaskainen, Nikolas and Nordbäck, Nicklas and Skyttä, Pietari and Engström, Jon},
year = {2022},
month = {01},
pages = {104528},
title = {A new subsampling methodology to optimize the characterization of two-dimensional bedrock fracture networks},
volume = {155},
journal = {Journal of Structural Geology},
doi = {10.1016/j.jsg.2022.104528}
}

e sono disponibili come ortofoto RGB da drone a questo link https://zenodo.org/records/4719627  (16 Gb) mentre le fratture sono tematizzate in un diversi file gpkg a questo link https://www.kaggle.com/datasets/nasurz/getaberget-fracture-trace-dataset/data (4Mb)

il problema e' che in questo formato le immagini non sono adatte al processamento per la rete neurale. L'idea e' quindi di dividere le ortofoto in tasselli da 15 metri e creare una immagine maschera con le sole fratturazioni. Tale compito e' stato eseguito con uno script Python in QGis

Nello script si devono impostare le coordinate geografiche della finestra  ed il folder dove saranno salvate le varie tiles. In un primo passaggio si disattiva dal progetto QGis il tema lineare delle fratture per avere solo i tasselli delle ortofoto e dopo si disattiva il tematismo raster per creare le maschere

 ------------------------------------------------------------------------------------------

from qgis.core import (
    QgsProject, QgsApplication, QgsLayout, QgsPrintLayout, QgsLayoutItemMap,
    QgsUnitTypes, QgsLayoutSize, QgsLayoutPoint, QgsLayoutExporter, QgsRectangle
)
import os



# Path to your QGIS project
project_path = "C:\\Users\\l.innocenti\\Desktop\\geta.qgz"

# Output folder
output_dir = "C:\\Users\\l.innocenti\\Desktop\\geta5\\mask\\"
os.makedirs(output_dir, exist_ok=True)

# Load project
project = QgsProject.instance()
project.read(project_path)


from qgis.core import QgsRectangle

# Compute project extent from all layers
extent = None
for layer in project.mapLayers().values():
    if not layer.isValid():
        continue
    layer_extent = layer.extent()
    if extent is None:
        extent = QgsRectangle(layer_extent)
    else:
        extent.combineExtentWith(layer_extent)

if extent is None:
    raise ValueError("No valid layers found in the project.")

xmin, ymin = 108914, 6720011
xmax, ymax = 108969, 6720066

# Get the full extent in project CRS
#extent = project.boundingBox()
#xmin, ymin, xmax, ymax = extent.xMinimum(), extent.yMinimum(), extent.xMaximum(), extent.yMaximum()

# Tile size in meters
tile_size = 15

# Iterate over the grid
x = xmin
tile_id = 0
while x < xmax:
    y = ymin
    while y < ymax:
        # Tile extent
        tile_extent = QgsRectangle(x, y, x + tile_size, y + tile_size)

        # Create a print layout
        layout = QgsPrintLayout(project)
        layout.initializeDefaults()
        layout.setName(f"tile_{tile_id}")
        pc = layout.pageCollection().pages()[0]

        # Map item
        map_item = QgsLayoutItemMap(layout)
        map_item.setRect(20, 20, 200, 200)
        map_item.setExtent(tile_extent)
        map_item.setBackgroundColor(QColor(255, 255, 255, 0))
        map_item.attemptResize(QgsLayoutSize(200, 200, QgsUnitTypes.LayoutMillimeters))
        visible_layers = [lyr for lyr in iface.mapCanvas().layers()]
        map_item.setLayers(visible_layers)

        layout.addLayoutItem(map_item)
        layout.addLayoutItem(map_item)
        map_item.attemptMove(QgsLayoutPoint(0, 0, QgsUnitTypes.LayoutMillimeters))

        # Export PNG
        exporter = QgsLayoutExporter(layout)
        out_path = os.path.join(output_dir, f"tile_{tile_id}.png")
        exporter.exportToImage(out_path, QgsLayoutExporter.ImageExportSettings())

        print(f"Saved {out_path}")
        tile_id += 1

        y += tile_size
    x += tile_size

  ------------------------------------------------------------------------------------------

 alla fine si hanno coppie di questo tipo (le immagini sono state rifilate con imagemagick per togliere lo spazio bianco inserito da QGis)


 

 


 

 per "aggiustare" il background con colore nero e il target come grigio come 127 

import cv2
import numpy as np
import sys

# --- Check for input filename ---
if len(sys.argv) < 2:
    print("Usage: python script.py <image_filename>")
    sys.exit(1)

filename = sys.argv[1]

# Read image
img = cv2.imread(filename)

if img is None:
    raise FileNotFoundError(f"Cannot read file: {filename}")

# Define white color in BGR
white = np.array([255, 255, 255], dtype=np.uint8)

# Create mask for white pixels
mask_white = np.all(img == white, axis=-1)

# Create output image
output = np.full_like(img, (128, 128, 128))  # Set all to gray
output[mask_white] = (0, 0, 0)  # Set white pixels to black

# Save with same filename (overwrite original)
cv2.imwrite(filename, output)

print(f"Image processed and saved: {filename}")

 

 

mercoledì 6 agosto 2025

Swin-Unet su carote di sondaggio

 Leggendo ho trovato che il piu' recente aggiornamento per la segmentazione di immagini e' costituita dalla rete Swin-Unet (derivante dalla fusione di Swin ed Unet)


per fare funzionare la rete per prima cosa si deve clone all'interno del folder del progetto questo repository https://github.com/HuCaoFighting/Swin-Unet
Il tempo di calcolo e' nettamente inferiore rispetto a DeepLab


import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from albumentations.pytorch import ToTensorV2
import matplotlib.pyplot as plt

import sys
sys.path.append('./Swin-Unet')

from networks.swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys
import timm

# -------- CONFIG --------
IMAGE_DIR = "data/images"
MASK_DIR = "data/masks"
NUM_CLASSES = 4
EPOCHS = 30
BATCH_SIZE = 8
IMG_SIZE = 224 # Corrected for Swin-Unet compatibility
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# ------------------------

# Map pixel values to class indices
PIXEL_TO_CLASS = {
0: 0, # background
85: 1, # sample
170: 2, # joint
255: 3 # strata
}

def convert_mask(mask):
"""Map 8-bit pixel values to class indices (0-3)."""
new_mask = np.zeros_like(mask, dtype=np.uint8)
for pixel_val, class_idx in PIXEL_TO_CLASS.items():
new_mask[mask == pixel_val] = class_idx
return new_mask

class SegmentationDataset(Dataset):
def __init__(self, image_dir, mask_dir, transform=None):
self.image_dir = image_dir
self.mask_dir = mask_dir
self.transform = transform
self.filenames = [f for f in os.listdir(image_dir) if f.endswith(".png")]

def __len__(self):
return len(self.filenames)

def __getitem__(self, idx):
img_path = os.path.join(self.image_dir, self.filenames[idx])
mask_path = os.path.join(self.mask_dir, self.filenames[idx])

image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = convert_mask(mask)

if self.transform:
augmented = self.transform(image=image, mask=mask)
image = augmented["image"]
mask = augmented["mask"]

return image, mask.long()
def get_class_counts(self):
"""Calculates pixel counts for each class to determine weights."""
class_counts = np.zeros(NUM_CLASSES)
for filename in self.filenames:
mask_path = os.path.join(self.mask_dir, filename)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = convert_mask(mask)
for i in range(NUM_CLASSES):
class_counts[i] += np.sum(mask == i)
return class_counts

def get_transforms():
return A.Compose([
A.Resize(IMG_SIZE, IMG_SIZE),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2()
])

class DiceLoss(nn.Module):
def __init__(self, num_classes, ignore_index=None):
super().__init__()
self.num_classes = num_classes
self.ignore_index = ignore_index

def forward(self, pred, target):
smooth = 1e-5
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
union = iflat.sum() + tflat.sum()
return 1.0 - ((2.0 * intersection + smooth) / (union + smooth))

class MixedLoss(nn.Module):
def __init__(self, num_classes, weights, dice_weight=0.5):
super().__init__()
self.cross_entropy_loss = nn.CrossEntropyLoss(weight=weights)
self.dice_loss = DiceLoss(num_classes)
self.dice_weight = dice_weight

def forward(self, pred, target):
# Convert target to one-hot for DiceLoss
target_one_hot = nn.functional.one_hot(target, num_classes=NUM_CLASSES).permute(0, 3, 1, 2).float()
ce_loss = self.cross_entropy_loss(pred, target)
dice_loss = self.dice_loss(torch.softmax(pred, dim=1), target_one_hot)

return ce_loss * (1 - self.dice_weight) + dice_loss * self.dice_weight

def train():
transform = get_transforms()
dataset = SegmentationDataset(IMAGE_DIR, MASK_DIR, transform=transform)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
# Calculate class weights for Cross-Entropy Loss
class_counts = dataset.get_class_counts()
total_pixels = class_counts.sum()
class_weights = 1.0 / (class_counts / total_pixels + 1e-5) # Add a small epsilon to prevent division by zero
class_weights = class_weights / class_weights.sum() # Normalize to sum to 1
class_weights = torch.from_numpy(class_weights).float().to(DEVICE)
print(f"Class weights: {class_weights}")

model = SwinTransformerSys(img_size=IMG_SIZE, num_classes=NUM_CLASSES)
model = model.to(DEVICE)

# Use the MixedLoss
criterion = MixedLoss(num_classes=NUM_CLASSES, weights=class_weights)
optimizer = optim.AdamW(model.parameters(), lr=1e-4)

for epoch in range(EPOCHS):
model.train()
total_loss = 0
for images, masks in dataloader:
images, masks = images.to(DEVICE), masks.to(DEVICE)

outputs = model(images)
loss = criterion(outputs, masks)

optimizer.zero_grad()
loss.backward()
optimizer.step()

total_loss += loss.item()

print(f"Epoch {epoch+1}/{EPOCHS}, Loss: {total_loss / len(dataloader):.4f}")

torch.save(model.state_dict(), "swinunet_model.pth")
visualize_prediction(model, dataset)

def visualize_prediction(model, dataset):
model.eval()
image, mask = dataset[0]
with torch.no_grad():
input_tensor = image.unsqueeze(0).to(DEVICE)
output = model(input_tensor)
pred = torch.argmax(output.squeeze(), dim=0).cpu().numpy()

# Reverse normalization for visualization
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
unnormalized_img = image.permute(1, 2, 0).cpu().numpy() * std + mean
unnormalized_img = np.clip(unnormalized_img, 0, 1)

class_to_pixel = {v: k for k, v in PIXEL_TO_CLASS.items()}
pred_mask_rgb = np.vectorize(class_to_pixel.get)(pred)
true_mask_rgb = np.vectorize(class_to_pixel.get)(mask.numpy())

plt.figure(figsize=(12, 5))
plt.subplot(1, 3, 1)
plt.title("Input Image")
plt.imshow(unnormalized_img)

plt.subplot(1, 3, 2)
plt.title("Prediction")
plt.imshow(pred_mask_rgb, cmap="jet", vmin=0, vmax=255)

plt.subplot(1, 3, 3)
plt.title("Ground Truth")
plt.imshow(true_mask_rgb, cmap="jet", vmin=0, vmax=255)

plt.tight_layout()
plt.show()

if __name__ == "__main__":
train()





DeepLab V3+ su carote di sondaggio

Usando lo stesso dataset e le stesse maschere  di training del precedente post ho provato la rete DeepLab V3+ 



Computazionalmente DeepLab e' risultata piu' impegnativa di Unet ma i risultati, come si vede dall'immagine di confronto soprastante, sono decisamente migliori in particolare per quanto riguarda i falsi positivi

pip install torch torchvision albumentations opencv-python matplotlib


import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from albumentations.pytorch import ToTensorV2
import matplotlib.pyplot as plt

# -------- CONFIG --------
IMAGE_DIR = "data/images"
MASK_DIR = "data/masks"
NUM_CLASSES = 4
EPOCHS = 20
BATCH_SIZE = 4
IMG_SIZE = 512
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# ------------------------

# Map pixel values to class indices
PIXEL_TO_CLASS = {
0: 0, # background
85: 1, # sample
170: 2, # joint
255: 3 # strata
}

def convert_mask(mask):
"""Map 8-bit pixel values to class indices (0-3)."""
new_mask = np.zeros_like(mask, dtype=np.uint8)
for pixel_val, class_idx in PIXEL_TO_CLASS.items():
new_mask[mask == pixel_val] = class_idx
return new_mask

class SegmentationDataset(Dataset):
def __init__(self, image_dir, mask_dir, transform=None):
self.image_dir = image_dir
self.mask_dir = mask_dir
self.transform = transform
self.filenames = [f for f in os.listdir(image_dir) if f.endswith(".png")]

def __len__(self):
return len(self.filenames)

def __getitem__(self, idx):
img_path = os.path.join(self.image_dir, self.filenames[idx])
mask_path = os.path.join(self.mask_dir, self.filenames[idx])

image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = convert_mask(mask)

if self.transform:
augmented = self.transform(image=image, mask=mask)
image = augmented["image"]
mask = augmented["mask"]

return image, mask.long()

def get_transforms():
return A.Compose([
A.Resize(IMG_SIZE, IMG_SIZE),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2()
])

def train():
# Load dataset
transform = get_transforms()
dataset = SegmentationDataset(IMAGE_DIR, MASK_DIR, transform=transform)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

# Load model
model = models.segmentation.deeplabv3_resnet50(weights=None, num_classes=NUM_CLASSES)
model = model.to(DEVICE)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)

# Training loop
for epoch in range(EPOCHS):
model.train()
total_loss = 0
for images, masks in dataloader:
images, masks = images.to(DEVICE), masks.to(DEVICE)

outputs = model(images)["out"]
loss = criterion(outputs, masks)

optimizer.zero_grad()
loss.backward()
optimizer.step()

total_loss += loss.item()

print(f"Epoch {epoch+1}/{EPOCHS}, Loss: {total_loss:.4f}")

# Save model
torch.save(model.state_dict(), "deeplabv3_model.pth")

# Visualize prediction on 1 sample
visualize_prediction(model, dataset)

def visualize_prediction(model, dataset):
model.eval()
image, mask = dataset[0]
with torch.no_grad():
input_tensor = image.unsqueeze(0).to(DEVICE)
output = model(input_tensor)["out"]
pred = torch.argmax(output.squeeze(), dim=0).cpu().numpy()

# Convert back to pixel values
class_to_pixel = {v: k for k, v in PIXEL_TO_CLASS.items()}
pred_mask_rgb = np.vectorize(class_to_pixel.get)(pred)
true_mask_rgb = np.vectorize(class_to_pixel.get)(mask.numpy())

plt.figure(figsize=(12, 5))
plt.subplot(1, 3, 1)
plt.title("Input Image")
plt.imshow(image.permute(1, 2, 0).cpu())

plt.subplot(1, 3, 2)
plt.title("Prediction")
plt.imshow(pred_mask_rgb, cmap="jet", vmin=0, vmax=255)

plt.subplot(1, 3, 3)
plt.title("Ground Truth")
plt.imshow(true_mask_rgb, cmap="jet", vmin=0, vmax=255)

plt.tight_layout()
plt.show()

if __name__ == "__main__":
train()



Analisi MNF su spettri di riflettanza di plastica

Devo cerca di lavorare su spettri di riflettanza di plastica e la prima domanda e': quale sono le bande significative? Sono partito dal ...