venerdì 11 aprile 2025

Marida Marine Debris Archive

Sto provando ad utilizzare Marida, un archivio di immagini supervisionate di Sentinel 2, raccolte in area non mediterranea con 15 categorie

Le immagini del dataset si possono scaricare da questo link (1.13 Gb) e l'articolo di riferimento e' https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0262247

Il dataset e' costituito da patches (immagini geotiff con risoluzione spaziale di 10 m, dimensioni di 2560x2560 m, 256x256 pixels ed 11 bande). In questo senso si deve considerare che le bande a 20 e 60 m sono state ricampionate


Le immagini sono classificate tramite poligoni shapefile in un separatore folder


 

Su Github e' presente il codice PyTorch per addestrare due reti neurali (https://paperswithcode.com/paper/resattunet-detecting-marine-debris-using-an) ma si possono addestrare i modelli gia' addestrati da qui per Unet e Resnet




Per fare inferenza si puo' usare lo script sottostante



Esempio di inferenza classificata

 

import torch
import torch.nn as nn
import torchvision.models as models
import sys

class ResNet50Encoder(nn.Module):
    def __init__(self, input_bands=11, output_classes=11):
        super(ResNet50Encoder, self).__init__()

        resnet = models.resnet50(pretrained=False)

        self.encoder = nn.Sequential(
            nn.Conv2d(input_bands, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False),
            resnet.bn1,
            resnet.relu,
            resnet.maxpool,
            resnet.layer1,
            resnet.layer2,
            resnet.layer3,
            resnet.layer4,
            resnet.avgpool
        )

        self.fc = nn.Linear(2048, output_classes)

    def forward(self, x):
        x = self.encoder(x)
        x = x.view(x.size(0), -1)
        logits = self.fc(x)
        return logits







# Instantiate and load weights
model = ResNet50Encoder(input_bands=11, output_classes=11)
model.load_state_dict(torch.load("model_resnet.pth", map_location="cpu"))
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

import rasterio
import numpy as np

with rasterio.open("test4.tif") as src:
    image = src.read().astype(np.float32)  # (11, 256, 256)

# Normalize
image = (image - image.min()) / (image.max() - image.min())

import torch
image_tensor = torch.from_numpy(image).unsqueeze(0)  # (1, 11, 256, 256)
image_tensor = image_tensor.to(device)

with torch.no_grad():
    output = model(image_tensor)

# If it's a classifier:
predicted_class = torch.argmax(output, dim=1)
print("Predicted class:", predicted_class.item())







#classificazione a livello di pixel
output = model(image_tensor)  # [1, num_classes, 256, 256]
mask = torch.argmax(output, dim=1).squeeze(0)  # shape: [256, 256]



import torch
from torchvision.utils import save_image
from PIL import Image
import numpy as np

# Example: your predicted mask (dtype: torch.Tensor, shape: [H, W])
mask = torch.randint(0, 10, (256, 256), dtype=torch.uint8)  # demo mask

# Option 1: using PIL directly (recommended for class masks)
mask_np = mask.numpy().astype(np.uint8)
img = Image.fromarray(mask_np, mode="L")  # 'L' = grayscale (8-bit)
img.save("predicted_mask.png")

palette = [
    0, 0, 0,         # class 0 - black
    255, 0, 0,       # class 1 - red
    0, 255, 0,       # class 2 - green
    0, 0, 255,       # class 3 - blue
    255, 255, 0,     # class 4 - yellow
    255, 0, 255,     # class 5 - magenta
    0, 255, 255,     # class 6 - cyan
    128, 128, 128,   # class 7 - gray
    255, 128, 0,     # class 8 - orange
    128, 0, 255,     # class 9 - violet
    0, 128, 255,     # class 10 - sky blue
]

# Apply the palette
color_mask = Image.fromarray(mask_np, mode="P")
color_mask.putpalette(palette)
color_mask.save("colored_mask.png")

##########################################################################################
# Define the segmentation model
class ResNetSegmentationFromClassifier(nn.Module):
    def __init__(self, encoder_model):
        super().__init__()
        # Extract the encoder part (everything except avgpool and fc)
        resnet = models.resnet50(pretrained=False)
       
        # First re-use the custom first conv layer from the encoder model
        first_conv = encoder_model.encoder[0]
       
        # Build encoder using resnet blocks
        self.encoder = nn.Sequential(
            first_conv,
            resnet.bn1,
            resnet.relu,
            resnet.maxpool,
            resnet.layer1,
            resnet.layer2,
            resnet.layer3,
            resnet.layer4
        )
       
        # Load weights from the encoder model
        # Copy weights for the shared layers
        encoder_state_dict = encoder_model.state_dict()
        for name, param in self.encoder.named_parameters():
            if name in encoder_state_dict:
                param.data.copy_(encoder_state_dict[name])
       
        # Decoder to upsample to 256x256
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(2048, 512, 2, stride=2),  # 8x8 -> 16x16
            nn.ReLU(),
            nn.ConvTranspose2d(512, 256, 2, stride=2),   # 16x16 -> 32x32
            nn.ReLU(),
            nn.ConvTranspose2d(256, 128, 2, stride=2),   # 32x32 -> 64x64
            nn.ReLU(),
            nn.ConvTranspose2d(128, 64, 2, stride=2),    # 64x64 -> 128x128
            nn.ReLU(),
            nn.ConvTranspose2d(64, 11, 2, stride=2)      # 128x128 -> 256x256
        )
   
    def forward(self, x):
        x = self.encoder(x)        # [B, 2048, 8, 8]
        x = self.decoder(x)        # [B, 11, 256, 256]
        return x

# Load the classification model
model = ResNet50Encoder(input_bands=11, output_classes=11)
model.load_state_dict(torch.load("model_resnet.pth", map_location="cpu"))
model.eval()

# Create the segmentation model
segmentation_model = ResNetSegmentationFromClassifier(model)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
segmentation_model.to(device)
segmentation_model.eval()

# Load and preprocess the image
with rasterio.open("test4.tif") as src:
    image = src.read().astype(np.float32)  # (11, 256, 256)

# Normalize
image = (image - image.min()) / (image.max() - image.min())
image_tensor = torch.from_numpy(image).unsqueeze(0)  # (1, 11, 256, 256)
image_tensor = image_tensor.to(device)

# Perform pixel-based classification
with torch.no_grad():
    output = segmentation_model(image_tensor)  # [1, 11, 256, 256]

# Create class mask by taking argmax at each pixel
mask = torch.argmax(output, dim=1).squeeze(0)  # shape: [256, 256]
mask_np = mask.cpu().numpy().astype(np.uint8)

# Define color palette (11 classes)
palette = [
    0, 0, 0,         # class 0 - black
    255, 0, 0,       # class 1 - red
    0, 255, 0,       # class 2 - green
    0, 0, 255,       # class 3 - blue
    255, 255, 0,     # class 4 - yellow
    255, 0, 255,     # class 5 - magenta
    0, 255, 255,     # class 6 - cyan
    128, 128, 128,   # class 7 - gray
    255, 128, 0,     # class 8 - orange
    128, 0, 255,     # class 9 - violet
    0, 128, 255,     # class 10 - sky blue
]

# Save grayscale mask
img = Image.fromarray(mask_np, mode="L")
img.save("predicted_mask.png")

# Save colored mask
color_mask = Image.fromarray(mask_np, mode="P")
color_mask.putpalette(palette)
color_mask.save("colored_mask.png")

# Print class distribution
unique, counts = np.unique(mask_np, return_counts=True)
class_distribution = dict(zip(unique, counts))
print("Pixel class distribution:", class_distribution)


giovedì 10 aprile 2025

Enmap ToolBox su Linux

Per installare Enmap ToolBox su Debian Linux si devono aggiungere alcune dipendenze. Alcune sono presenti gia' in apt ma enpt_enmapboxapp deve essere scaricata con pip. A questo punto e' necessario creare un venv e si deve lanciare qgis dall'interno del venv




sudo apt install python3-h5py python3-pyqt5.qtopengl python3-netcdf4

python3 -m venv .venv --system-site-packages

pip3 install enpt_enmapboxapp

/tmp non scrivibile

Mi hanno chiesto una mano per un server remoto che non era piu' gestibile..il sistema era comunque raggiungibile via ssh. Pensavo semplicemente che si fosse esaurito lo spazio disco...purtroppo no

La prima cosa che e' saltato all'occhio era che il servizio di aggiornamento non supervisionato di Ubuntu consumava il 50% della CPU da decine di ore (il sistema non era amministrato).... lo ho terminato ma nessun miglioramento

Cercando il fare un apt update il processo si bloccava perche' il folder /tmp era bloccato...non era scrivibile nemmeno dall'amministratore. Il sistema era montato in raid ma non c'eranon errori sul filesystem. I permessi del folder /tmp erano corretti (sticky bit compreso)

Alla fine ho creato un nuovo folder in /mnt e poi 

mount --bind /tmp/ /mnt/newtmp
 
A questo punto ho ripreso il controllo della macchina anche se non ho capito l'origine del problema


domenica 9 marzo 2025

Physics informed neural network Fukuzono

Visto che puro ML non funziona per le serie tempo di cui mi sto occupando ed le regressioni basate su formule analitiche mostrano dei limiti in presenza del rumore degli strumenti proviamo con PINN (Physics informed Neural Networks) in cui si conosce a priori la legge che regola il fenomeno e si demanda alla rete neurale di approssimare al meglio le variabili sconosciute o di gestire il rumore strumentale

Data la mancanza di dati mi sono fatto (...ha fatto ChatGPT) un generatore di dati sintetici che puo' aggiungere un livello arbitrario di rumore

Sono stati creati due dataset 

Dataset prova 1

Dataset prova 2


 

import csv
import random
import matplotlib.pyplot as plt

def generate_landslide_data(num_points=100, initial_displacement=10.0, velocity_decrease_rate=0.005, noise_level=0.01):
"""
Generates simulated landslide data with a *linear* decrease in inverse velocity,
and calculates inverse velocity. Correctly applies noise to the velocity.
"""
data = []
inverse_velocity = 1.0 # Start with an arbitrary inverse velocity
displacement = initial_displacement
previous_displacement = initial_displacement #set initial displacement

for day in range(1, num_points + 1):
# Linearly decrease the inverse velocity:
inverse_velocity -= velocity_decrease_rate

# Ensure inverse velocity doesn't go below a small positive value to avoid div by zero
inverse_velocity = max(inverse_velocity, 0.0001)

# Calculate velocity (V = 1 / (1/V)):
velocity = 1 / inverse_velocity

# Add noise to the velocity *directly*:
noise = random.uniform(-noise_level * velocity, noise_level * velocity)
velocity += noise

# Ensure velocity doesn't become negative after adding noise (important!)
velocity = max(velocity, 0.0001) #Very small positive value to avoid zero velocity.
inverse_velocity = 1/velocity #Recalculate inverse velocity based on the noisy velocity.

# Calculate displacement increase (ΔDisplacement = Velocity * ΔTime)
displacement_increase = velocity

# Calculate new displacement
displacement += displacement_increase

data.append((day, displacement, inverse_velocity))

return data

def write_to_csv(data, filename="landslide_data.csv"):
"""
Writes the simulated landslide data to a CSV file with semicolon as a separator,
including a column for inverse velocity.
"""
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=";")
writer.writerow(["Day", "Cumulative_Displacement", "Inverse_Velocity"])
for row in data:
inverse_velocity_str = "{:.6f}".format(row[2]) if row[2] is not None else ""
writer.writerow([row[0], row[1], inverse_velocity_str])

print(f"Data written to {filename}")



def plot_inverse_velocity(filename="landslide_data.csv"):
"""
Reads the landslide data from the CSV file and plots inverse velocity vs. time using matplotlib.
"""
days = []
inverse_velocities = []

with open(filename, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=";")
next(reader) # Skip the header row

for row in reader:
try:
day = int(row[0])
inverse_velocity = float(row[2]) if row[2] else None #Handle empty cells
if inverse_velocity is not None:
days.append(day)
inverse_velocities.append(inverse_velocity)
except (ValueError, IndexError) as e:
print(f"Error reading row: {row}. Skipping. Error: {e}")


# Plotting the Data:

if days and inverse_velocities: # Check to make sure data was read.
plt.figure(figsize=(10, 6)) # Set plot size
plt.plot(days, inverse_velocities, marker='o', linestyle='-', color='blue') #Plot data

plt.title("Fukuzono Plot: Inverse Velocity vs. Time") #Set title
plt.xlabel("Time (Days)") #set labels
plt.ylabel("Inverse Velocity (days/mm)")
plt.grid(True) #Add grid
plt.show() #Display plot
else:
print("No data to plot.")


# --- Main execution ---
if __name__ == "__main__":
landslide_data = generate_landslide_data(num_points=100, initial_displacement=10.0, velocity_decrease_rate=0.011, noise_level=0.05) #Tuned parameters
write_to_csv(landslide_data)
plot_inverse_velocity() #Call the plotting function


A questo punto ho provato a farci girare la PINN

Il metodo di Fukuzono dell'inverso della velocita' deve essere espresso in modo differenziale


La formula completa senza la semplificazione dell'esponente 2    (le lettere tra le due formule indicano grandezze non coincidenti)
 

 


con la rete che cerca di ottimizzare il parametro m

modello dataset 1

modello dataset 2


 

alla fine si vede che il modello ha sovrastimato il momento del collasso

come nota il modello ha bisogno di un mumero elevato (> 5000 epochs) per convergere in maniera minimamente soddisfacente

import tensorflow as tf
import numpy as np
import pandas as pd

# Carica i dati dal file CSV (Day, Cumulative_Displacement, Inverse_Velocity)
# Modifica qui con il nome del file
file_path = './fuku/landslide_data3.csv'
df = pd.read_csv(file_path, delimiter=';')

t = df['Day'].values.reshape(-1, 1) # Tempo (giorni)
inv_v = df['Inverse_Velocity'].values.reshape(-1, 1) # Inverso della velocità

# Parametro iniziale m (da ottimizzare)
m_init = tf.Variable(1.5, dtype=tf.float32, trainable=True)

# Modello PINN
class PINN(tf.keras.Model):
def __init__(self):
super(PINN, self).__init__()
self.hidden1 = tf.keras.layers.Dense(20, activation='tanh')
self.hidden2 = tf.keras.layers.Dense(20, activation='tanh')
self.hidden3 = tf.keras.layers.Dense(20, activation='tanh')
self.out = tf.keras.layers.Dense(1)
def call(self, t):
x = self.hidden1(t)
x = self.hidden2(x)
x = self.hidden3(x)
return self.out(x)

# Inizializza il modello
model = PINN()

def loss_fn(t, inv_v, m):
t = tf.convert_to_tensor(t, dtype=tf.float32) # Conversione a tensore
inv_v = tf.convert_to_tensor(inv_v, dtype=tf.float32)
with tf.GradientTape() as tape:
tape.watch(t)
u = model(t)
du_dt = tape.gradient(u, t)
if du_dt is None:
du_dt = tf.zeros_like(t) # Evita errori se il gradiente è None
eqn = du_dt - m * ((u*u) / tf.convert_to_tensor(t_f - t, dtype=tf.float32)) # Converte t_f - t in tensore
mse_data = tf.reduce_mean(tf.square(u - inv_v))
mse_phys = tf.reduce_mean(tf.square(eqn))
return mse_data + mse_phys

# Ottimizzatore
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

# Tempo di collasso iniziale (da stimare)
t_f = tf.Variable(np.max(t) + 1.0, dtype=tf.float32, trainable=True)

# Training Loop
epochs = 10000
for epoch in range(epochs):
with tf.GradientTape() as tape:
loss = loss_fn(t, inv_v, m_init)
grads = tape.gradient(loss, model.trainable_variables + [t_f, m_init])
optimizer.apply_gradients(zip(grads, model.trainable_variables + [t_f, m_init]))

if epoch % 10 == 0:
print(f"Epoch {epoch}, Loss: {loss.numpy()}, t_f: {t_f.numpy()}, m: {m_init.numpy()}")



from scipy.interpolate import interp1d
from scipy.optimize import brentq
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# Compute min and max time from dataset
t_min = np.min(t)
t_max = np.max(t)

# Generate predictions for a fine grid of time values
t_pred = np.linspace(t_min, t_max, 500).reshape(-1, 1).astype(np.float32)
inv_v_pred = model(t_pred).numpy().flatten()

# Ensure `t_pred_original` has the correct shape
t_pred_original = t_pred.flatten()

# Fit a linear model to estimate collapse time
lin_reg = LinearRegression()
lin_reg.fit(t_pred_original.reshape(-1, 1), inv_v_pred)
slope = lin_reg.coef_[0]
intercept = lin_reg.intercept_

# Compute collapse time as x-intercept (1/V = 0)
if slope != 0:
collapse_time = -intercept / slope
print(f"Estimated collapse time: {collapse_time:.2f} days")
else:
collapse_time = None
print("Could not estimate collapse time (slope is zero).")

# Visualization
plt.figure(figsize=(10, 6))

# Scatter plot of real data
plt.scatter(t, inv_v, label='Data', color='red', marker='o')

# PINN-predicted curve
plt.plot(t_pred_original, inv_v_pred, label='PINN Prediction', color='blue')

# Linear regression line (dashed black)
t_reg_line = np.linspace(t_min, t_max, 500)
inv_v_reg_line = lin_reg.predict(t_reg_line.reshape(-1, 1))
plt.plot(t_reg_line, inv_v_reg_line, linestyle='dashed', color='black', label="Linear Fit")

# Vertical line at estimated collapse time
if collapse_time is not None and t_min <= collapse_time <= t_max:
plt.axvline(collapse_time, color='green', linestyle='dashed', label=f'Collapse Time: {collapse_time:.2f} days')

plt.xlabel('Time (days)')
plt.ylabel('Inverse Velocity (1/V)')
plt.legend()
plt.grid()
plt.title(f"Forecasted Collapse Time: {collapse_time:.2f}" if collapse_time else "Forecasted Collapse Time: Not Found")
plt.show()

Marida Marine Debris Archive

Sto provando ad utilizzare Marida, un archivio di immagini supervisionate di Sentinel 2, raccolte in area non mediterranea con 15 categorie ...