lunedì 23 marzo 2026

Errore NTFS su Debian

Montando un disco USB esterno che usiamo di scambio a lavoro (e quindi formattato NTFS per Windows) e' comparso il seguente errore 

 

luca@HW27747:~$ sudo mount -t ntfs-3g /dev/sda1 /media/cdrom
$MFTMirr does not match $MFT (record 3).
Failed to mount '/dev/sda1': Input/output error
NTFS is either inconsistent, or there is a hardware fault, or it's a
SoftRAID/FakeRAID hardware. In the first case run chkdsk /f on Windows
then reboot into Windows twice. The usage of the /f parameter is very
important! If the device is a SoftRAID/FakeRAID then first activate
it and mount a different device under the /dev/mapper/ directory, (e.g.
/dev/mapper/nvidia_eahaabcc1). Please see the 'dmraid' documentation
for more details. 

la soluzione e' banale 

sudo ntfsfix /dev/sda1 

Soil Moisture con dati iperspettrali

Durante il dottorato avevo fatto qualche prova con campioni artificiale di terreno ad umidita' controllata ed avevo una ottima correlazione con la misura iperspettrale

I campioni erano di terreno naturale con umidita' aggiunta artificialmente 

Campione di tesi dottorato - illuminazione solare
 

Dopo la rimozione del continuum avevo lavorato sugli assorbimenti nello swir
 

Adesso ho trovato questo lavoro

Felix M. Riese and Sina Keller, “Introducing a Framework of Self-Organizing Maps for Regression of Soil Moisture with Hyperspectral Data,” in IGARSS 2018 - 2018 IEEE International Geoscience and Remote Sensing Symposium, Valencia, Spain, 2018, pp. 6151-6154. 

i dati sono riportati a questo link in formato csv con 680 spettri VNIR di campagna e relative misure di umidita' realizzate in sito tramite TRIME-PICO time-domain reflectometry (TDR) 

questi sono i dati normalizzati con rimozione del continuo


 In VNIR non risulta immediato un approccio di misura della profondita' di picco ed ho provato a vedere come si comportava un approccio random forest (ho utilizzato gemini in Google Colab in modo da avere anche l'ottimizzazione dei parametri in modo semplice)


import pandas as pd

# Load the dataset
df = pd.read_csv('/content/soilmoisture_dataset.csv')
display(df.head())


# The third column (index 2) is Y
Y = df.iloc[:, 2]

# Columns from the 19th (index 18) to the end are X
X = df.iloc[:, 18:]

# Drop rows with any NaN values in X or Y for model training
data = pd.concat([X, Y], axis=1).dropna()
X = data.iloc[:, :-1]
Y = data.iloc[:, -1]

print(f"Shape of X: {X.shape}")
print(f"Shape of Y: {Y.shape}")

display(X.head())
display(Y.head())


from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score

# Split data into training and testing sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

# Initialize and train the Random Forest Regressor model
model = RandomForestRegressor(n_estimators=100, random_state=42)
model.fit(X_train, Y_train)

# Make predictions on the test set
Y_pred = model.predict(X_test)

# Evaluate the model
mse = mean_squared_error(Y_test, Y_pred)
r2 = r2_score(Y_test, Y_pred)

print(f"Mean Squared Error: {mse:.4f}")
print(f"R-squared: {r2:.4f}")

from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
import numpy as np

# Define the parameter distribution for RandomizedSearchCV
param_dist = {
'n_estimators': np.arange(100, 1000, 100), # Number of trees in the forest
'max_features': ['sqrt', 'log2'], # Number of features to consider at every split
'max_depth': [None, 10, 20, 30, 40, 50], # Maximum number of levels in tree
'min_samples_split': np.arange(2, 20, 2), # Minimum number of samples required to split a node
'min_samples_leaf': np.arange(1, 10, 1), # Minimum number of samples required at each leaf node
'bootstrap': [True, False] # Method for sampling data points (with or without replacement)
}

# Initialize a base Random Forest Regressor
rf = RandomForestRegressor(random_state=42)

# Initialize RandomizedSearchCV
# n_iter controls the number of parameter settings that are sampled.
# cv is the number of folds for cross-validation.
random_search = RandomizedSearchCV(estimator=rf, param_distributions=param_dist,
n_iter=50, cv=5, verbose=2, random_state=42,
n_jobs=-1, scoring='neg_mean_squared_error')

# Fit the random search model
random_search.fit(X_train, Y_train)

print("Best Parameters found:", random_search.best_params_)
print("Best Negative Mean Squared Error (from CV):", random_search.best_score_)

# Get the best model
best_rf_model = random_search.best_estimator_

# Make predictions with the best model on the test set
Y_pred_tuned = best_rf_model.predict(X_test)

# Evaluate the tuned model
mse_tuned = mean_squared_error(Y_test, Y_pred_tuned)
r2_tuned = r2_score(Y_test, Y_pred_tuned)

print(f"\nPerformance of the tuned Random Forest model on the test set:")
print(f"Mean Squared Error (tuned): {mse_tuned:.4f}")
print(f"R-squared (tuned): {r2_tuned:.4f}")

import matplotlib.pyplot as plt
import seaborn as sns

plt.figure(figsize=(10, 7))
sns.scatterplot(x=Y_test, y=Y_pred, alpha=0.6)
plt.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'r--', lw=2, label='Perfect Prediction Line') # Add a diagonal line for perfect prediction
plt.title('Actual vs. Predicted Soil Moisture Values (Test Set)')
plt.xlabel('Actual Soil Moisture')
plt.ylabel('Predicted Soil Moisture')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns

plt.figure(figsize=(10, 7))
sns.scatterplot(x=Y_test, y=Y_pred_tuned, alpha=0.6)
plt.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'r--', lw=2, label='Perfect Prediction Line')
plt.title('Actual vs. Predicted Soil Moisture Values (Tuned Model - Test Set)')
plt.xlabel('Actual Soil Moisture')
plt.ylabel('Predicted Soil Moisture')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns

plt.figure(figsize=(10, 7))
sns.scatterplot(x=Y_test, y=Y_pred_tuned, alpha=0.6)
plt.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'r--', lw=2, label='Perfect Prediction Line')
plt.title('Actual vs. Predicted Soil Moisture Values (Tuned Model - Test Set)')
plt.xlabel('Actual Soil Moisture')
plt.ylabel('Predicted Soil Moisture')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns

plt.figure(figsize=(10, 7))
sns.scatterplot(x=Y_test, y=Y_pred, alpha=0.6)
plt.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'r--', lw=2, label='Perfect Prediction Line') # Add a diagonal line for perfect prediction
plt.title('Actual vs. Predicted Soil Moisture Values (Original Model - Test Set)')
plt.xlabel('Actual Soil Moisture')
plt.ylabel('Predicted Soil Moisture')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns

plt.figure(figsize=(10, 6))
sns.histplot(residuals_tuned, kde=True)
plt.title('Distribution of Residuals for Tuned Model')
plt.xlabel('Residuals (Actual - Predicted)')
plt.ylabel('Frequency')
plt.grid(True)
plt.tight_layout()
plt.show()

residuals_tuned = Y_test - Y_pred_tuned
print("First 5 residuals of the tuned model:")
display(residuals_tuned.head())

import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# Get feature importances from the model
feature_importances = model.feature_importances_

# Create a DataFrame for better visualization
feature_names = X.columns
importance_df = pd.DataFrame({'Feature': feature_names, 'Importance': feature_importances})

# Sort the features by importance in descending order
importance_df = importance_df.sort_values(by='Importance', ascending=False)

# Display the top 10 most important features
print("Top 10 Most Important Features:")
display(importance_df.head(10))

# Plot the top 20 most important features
plt.figure(figsize=(12, 8))
sns.barplot(x='Importance', y='Feature', data=importance_df.head(20), palette='viridis', hue='Feature', legend=False)
plt.title('Top 20 Most Important Features (Random Forest Regressor)')
plt.xlabel('Importance')
plt.ylabel('Feature')
plt.tight_layout()
plt.show()


Questo e' il grafico del modello non ottimizzato

e questo il grafico ottimizzato

grafico degli errori residui


 


 Come si vede il modello funziona deciamente bene considerando che siamo in condizioni naturali.L'aspetto decisamente piu' interessante e' dal grafico successivo sul peso delle diverse bande nei dati

Come si vede viene estratto il valore di 950 nm come lunghezza d'onda parametro piu' significativo del modello (che e' anche l'estremo del sensore)...questa lunghezza d'onda corrisponde (in realta' sarebbe a 970 nm ma al di fuori del range di misura) della terza armonica del legame O-H. Quindi il modello in maniera autonoma ha individuata una variabile latente (la terza armonica e' un assorbimento molto piu' debole rispetto a quelli presenti nello swir)

Visto che in in linea di principio la posizione dell'assorbimento e' anche funzione della temperatura e visto che il dataset include anche la temperatura del suolo ho provato ad inserire nei valori X del modello anche la temperatura oltre alla riflettanza

 

Non me lo aspettavo ma il modello e' migliorato sensibilmente. 
 

Questo il codice modificato



import pandas as pd

df = pd.read_csv('/content/soilmoisture_dataset.csv')

print("First 5 rows of the dataset:")
display(df.head())

print("\nInformation about the dataset:")
df.info()

y_columns = ['soil_moisture']
X_columns = ['soil_temperature'] + [col for col in df.columns if col.isdigit()]

X = df[X_columns]
Y = df[y_columns]

print(f"Shape of features (X): {X.shape}")
print(f"Shape of targets (Y): {Y.shape}")

print("\nFirst 5 rows of features (X):")
display(X.head())

print("\nFirst 5 rows of targets (Y):")
display(Y.head())

from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

print(f"X_train shape: {X_train.shape}")
print(f"X_test shape: {X_test.shape}")
print(f"Y_train shape: {Y_train.shape}")
print(f"Y_test shape: {Y_test.shape}")

model = RandomForestRegressor(random_state=42)
model.fit(X_train, Y_train.values.ravel()) # .values.ravel() to flatten Y_train for single output regression

Y_pred = model.predict(X_test)

print("\nFirst 5 predictions:")
print(Y_pred[:5])

import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# Get feature importances
feature_importances = model.feature_importances_

# Create a DataFrame for better visualization
importance_df = pd.DataFrame({
'Feature': X_columns,
'Importance': feature_importances
})

# Sort by importance in descending order
importance_df = importance_df.sort_values(by='Importance', ascending=False)

print("Top 10 Feature Importances:")
display(importance_df.head(10))

# Visualize top N feature importances
plt.figure(figsize=(12, 7))
sns.barplot(x='Importance', y='Feature', data=importance_df.head(10))
plt.title('Top 10 Feature Importances for Soil Moisture and Temperature Prediction')
plt.xlabel('Importance')
plt.ylabel('Reflectance Band (Feature)')
plt.show()

print("Bottom 10 Feature Importances:")
display(importance_df.tail(10))

# Visualize bottom N feature importances
plt.figure(figsize=(12, 7))
sns.barplot(x='Importance', y='Feature', data=importance_df.tail(10))
plt.title('Bottom 10 Feature Importances for Soil Moisture and Temperature Prediction')
plt.xlabel('Importance')
plt.ylabel('Reflectance Band (Feature)')
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns

# Create a DataFrame for actual vs. predicted values for easier plotting
results_df = Y_test.copy()
results_df['soil_moisture_pred'] = Y_pred # Y_pred is now a 1D array for single output

# Calculate residuals
results_df['soil_moisture_residuals'] = results_df['soil_moisture'] - results_df['soil_moisture_pred']

# Plotting Actual vs. Predicted for Soil Moisture
plt.figure(figsize=(12, 6))
sns.scatterplot(x='soil_moisture', y='soil_moisture_pred', data=results_df)
plt.plot([results_df['soil_moisture'].min(), results_df['soil_moisture'].max()],
[results_df['soil_moisture'].min(), results_df['soil_moisture'].max()],
color='red', linestyle='--', lw=2)
plt.title('Actual vs. Predicted Soil Moisture')
plt.xlabel('Actual Soil Moisture')
plt.ylabel('Predicted Soil Moisture')
plt.grid(True, linestyle='--', alpha=0.7)
plt.show()

# Plotting Residuals for Soil Moisture
plt.figure(figsize=(12, 6))
sns.scatterplot(x=results_df['soil_moisture_pred'], y=results_df['soil_moisture_residuals'])
plt.axhline(y=0, color='r', linestyle='--')
plt.title('Residual Plot for Soil Moisture')
plt.xlabel('Predicted Soil Moisture')
plt.ylabel('Residuals (Actual - Predicted)')
plt.grid(True, linestyle='--', alpha=0.7)
plt.show()

from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

# Since Y_pred is now a 1D array for a single target, we don't need to iterate or use Y_pred[:, i]
print(f"\n--- Metrics for {y_columns[0]} ---")
mae = mean_absolute_error(Y_test, Y_pred)
mse = mean_squared_error(Y_test, Y_pred)
r2 = r2_score(Y_test, Y_pred)

print(f"Mean Absolute Error (MAE): {mae:.3f}")
print(f"Mean Squared Error (MSE): {mse:.3f}")
print(f"R-squared (R2) Score: {r2:.3f}")

 

 

 

 

 

 

venerdì 20 marzo 2026

Spettri di Prisma

Sono ritornato a lavorare sui dati Prisma, stavolta in maniera piu' convinta

Il primo problema e' trovare il software giusto. Se si aprono le immagini con Esa Snap non ci sono particolari problemi tranne quando si arriva ad aprire Spectrum View e si scopre che le bande dell'immagini sono divise in due VNIR e SWIR e quindi non e' possibile avere uno spettro completo da 400 a 2500 nm. Si devono aprire le due immagini ed a seconda di dove e' il cursore Spectrum View si spostera' tra i dati VNIR e SWIR 

 


Per avere uno spettro completo l'unica soluzione e' stata passare da uno script Python (compilato via Claude AI)

 


"""
PRISMA Hyperspectral Spectrum Plotter (L2C)
=============================================
Reads a PRISMA L2C HE5 file and plots the full VNIR+SWIR spectrum
for a user-selected pixel.

Confirmed HDF5 layout:
Cubes : HDFEOS/SWATHS/PRS_L2C_HCO/Data Fields/{VNIR,SWIR}_Cube
shape = (rows, bands, cols)
Wavelengths : root attrs List_Cw_Vnir / List_Cw_Swir (nm, length=n_bands)
Band flags : root attrs List_Cw_Vnir_Flags / List_Cw_Swir_Flags (1=valid)
Scale : root attrs L2ScaleVnirMin/Max, L2ScaleSwirMin/Max
DN → reflectance = DN / 65535 * (ScaleMax - ScaleMin) + ScaleMin

Usage
-----
python prisma_spectrum_plot.py <file.he5> [row] [col] [-o out.png]

Dependencies
------------
pip install h5py numpy matplotlib
"""

import argparse
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from pathlib import Path


CUBE_VNIR = "HDFEOS/SWATHS/PRS_L2C_HCO/Data Fields/VNIR_Cube"
CUBE_SWIR = "HDFEOS/SWATHS/PRS_L2C_HCO/Data Fields/SWIR_Cube"


def load_prisma(filepath: str, row: int | None = None, col: int | None = None):
"""
Extract a single-pixel VNIR+SWIR spectrum from a PRISMA L2C HE5 file.

Parameters
----------
filepath : path to the .he5 file
row, col : pixel coordinates (0-based). Default = image centre.

Returns
-------
wavelengths : np.ndarray – wavelengths in nm (valid bands only, sorted)
spectrum : np.ndarray – reflectance [0–1]
meta : dict
"""
filepath = Path(filepath)
if not filepath.exists():
raise FileNotFoundError(filepath)

with h5py.File(filepath, "r") as f:

# ── wavelengths and valid-band flags from root attributes ─────────────
vnir_wl = np.array(f.attrs["List_Cw_Vnir"], dtype=np.float32)
swir_wl = np.array(f.attrs["List_Cw_Swir"], dtype=np.float32)
vnir_flags = np.array(f.attrs["List_Cw_Vnir_Flags"], dtype=np.int8)
swir_flags = np.array(f.attrs["List_Cw_Swir_Flags"], dtype=np.int8)

vnir_valid = vnir_flags == 1 # boolean mask for valid VNIR bands
swir_valid = swir_flags == 1 # boolean mask for valid SWIR bands

# ── scale factors: uint16 DN → reflectance ────────────────────────────
vnir_scale_min = float(f.attrs["L2ScaleVnirMin"])
vnir_scale_max = float(f.attrs["L2ScaleVnirMax"])
swir_scale_min = float(f.attrs["L2ScaleSwirMin"])
swir_scale_max = float(f.attrs["L2ScaleSwirMax"])

# ── cube shapes: (rows, bands, cols) ──────────────────────────────────
vnir_ds = f[CUBE_VNIR]
swir_ds = f[CUBE_SWIR]
n_rows, n_vnir_bands, n_cols = vnir_ds.shape
_, n_swir_bands, _ = swir_ds.shape

if row is None: row = n_rows // 2
if col is None: col = n_cols // 2

if not (0 <= row < n_rows and 0 <= col < n_cols):
raise ValueError(
f"Pixel ({row}, {col}) outside image bounds ({n_rows}x{n_cols})."
)

# ── read single pixel: cube[row, :, col] ─────────────────────────────
vnir_dn = vnir_ds[row, :, col].astype(np.float32)
swir_dn = swir_ds[row, :, col].astype(np.float32)

# ── DN → reflectance ──────────────────────────────────────────────────────
vnir_ref = vnir_dn / 65535.0 * (vnir_scale_max - vnir_scale_min) + vnir_scale_min
swir_ref = swir_dn / 65535.0 * (swir_scale_max - swir_scale_min) + swir_scale_min

# ── apply valid-band masks ────────────────────────────────────────────────
# Flags array length matches n_bands; trim to cube size just in case
vnir_mask = vnir_valid[:n_vnir_bands]
swir_mask = swir_valid[:n_swir_bands]

vnir_wl = vnir_wl[:n_vnir_bands][vnir_mask]
vnir_ref = vnir_ref[vnir_mask]
swir_wl = swir_wl[:n_swir_bands][swir_mask]
swir_ref = swir_ref[swir_mask]

# ── sort by wavelength (PRISMA stores bands longest-first) ───────────────
def sort_wl(wl, sp):
idx = np.argsort(wl)
return wl[idx], sp[idx]

vnir_wl, vnir_ref = sort_wl(vnir_wl, vnir_ref)
swir_wl, swir_ref = sort_wl(swir_wl, swir_ref)

# ── remove VNIR/SWIR overlap: keep VNIR below SWIR start ─────────────────
vnir_keep = vnir_wl < swir_wl[0]
wavelengths = np.concatenate([vnir_wl[vnir_keep], swir_wl])
spectrum = np.concatenate([vnir_ref[vnir_keep], swir_ref])

meta = dict(
row=row, col=col,
n_rows=n_rows, n_cols=n_cols,
n_vnir_valid=int(vnir_mask.sum()),
n_swir_valid=int(swir_mask.sum()),
filename=filepath.name,
)
return wavelengths, spectrum, meta


# ── plot ───────────────────────────────────────────────────────────────────────

COLOUR_REGIONS = [
(400, 700, "#e8f4e8", "VIS"),
(700, 1000, "#f5e8f5", "NIR"),
(1000, 1800, "#e8eef5", "SWIR-1"),
(1800, 2500, "#f5f0e8", "SWIR-2"),
]

ABSORPTION_BANDS = [
(1340, 1460, "H\u2082O"),
(1800, 1960, "H\u2082O"),
]


def plot_spectrum(wavelengths, spectrum, meta, output_path=None):
fig, ax = plt.subplots(figsize=(13, 5))

wl_min, wl_max = wavelengths[0], wavelengths[-1]
sp_max = np.nanmax(spectrum)

# Spectral region backgrounds
for lo, hi, colour, label in COLOUR_REGIONS:
lo_c, hi_c = max(lo, wl_min), min(hi, wl_max)
if lo_c < hi_c:
ax.axvspan(lo_c, hi_c, color=colour, alpha=0.45, zorder=0)
ax.text((lo_c + hi_c) / 2, sp_max * 1.02,
label, ha="center", va="bottom", fontsize=8,
color="#888888", zorder=2)

# Atmospheric absorption windows
for lo, hi, label in ABSORPTION_BANDS:
lo_c, hi_c = max(lo, wl_min), min(hi, wl_max)
if lo_c < hi_c:
ax.axvspan(lo_c, hi_c, color="#bbbbbb", alpha=0.55, zorder=1)
ax.text((lo_c + hi_c) / 2, sp_max * 0.96,
label, ha="center", va="top", fontsize=8,
color="#444444", zorder=3)

ax.plot(wavelengths, spectrum, color="#1a5276", linewidth=1.3, zorder=4)
ax.fill_between(wavelengths, spectrum, alpha=0.12, color="#1a5276", zorder=3)

ax.set_xlim(wl_min, wl_max)
ax.set_ylim(bottom=0)
ax.set_xlabel("Wavelength (nm)", fontsize=11)
ax.set_ylabel("Reflectance", fontsize=11)
ax.set_title(
f"PRISMA L2C Spectrum - pixel ({meta['row']}, {meta['col']}) "
f"[{meta['n_rows']}x{meta['n_cols']}]\n{meta['filename']}",
fontsize=10, pad=8
)
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(5))
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(4))
ax.tick_params(which="both", direction="in")
ax.grid(True, which="major", linestyle="--", alpha=0.35)
ax.annotate(
f"VNIR: {meta['n_vnir_valid']} bands | SWIR: {meta['n_swir_valid']} bands",
xy=(0.99, 0.02), xycoords="axes fraction",
ha="right", va="bottom", fontsize=8, color="#666666"
)

plt.tight_layout()
if output_path:
fig.savefig(output_path, dpi=150, bbox_inches="tight")
print(f"[OK] Saved: {output_path}")
else:
plt.show()
plt.close(fig)


# ── CLI ────────────────────────────────────────────────────────────────────────

def main():
parser = argparse.ArgumentParser(
description="Plot a VNIR+SWIR pixel spectrum from a PRISMA L2C HE5 file."
)
parser.add_argument("he5_file")
parser.add_argument("row", nargs="?", type=int, default=None,
help="Pixel row (default: centre)")
parser.add_argument("col", nargs="?", type=int, default=None,
help="Pixel col (default: centre)")
parser.add_argument("-o", "--output", default=None,
help="Save figure to file (e.g. spectrum.png)")
args = parser.parse_args()

print(f"[...] Reading {args.he5_file}")
wavelengths, spectrum, meta = load_prisma(args.he5_file, args.row, args.col)

print(
f"[OK] {meta['n_vnir_valid']} VNIR + {meta['n_swir_valid']} SWIR valid bands\n"
f" Image : {meta['n_rows']} rows x {meta['n_cols']} cols\n"
f" Pixel : row={meta['row']}, col={meta['col']}\n"
f" Range : {wavelengths[0]:.1f} - {wavelengths[-1]:.1f} nm "
f"({len(wavelengths)} merged bands)"
)

plot_spectrum(wavelengths, spectrum, meta, output_path=args.output)


if __name__ == "__main__":
main()


 

 

 

 

 

 

 

 

 

 

 

Errore NTFS su Debian

Montando un disco USB esterno che usiamo di scambio a lavoro (e quindi formattato NTFS per Windows) e' comparso il seguente errore    lu...