# -*- coding: utf-8 -*-
"""Quincineto_stima_errore_detrend2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fFEfuUj5iwNucKDOfVpEzvIYK_1KebH8
### Elaborazione Quincineto
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import tensorflow as tf
import os
# Commented out IPython magic to ensure Python compatibility.
#prepara tensorboard
# %load_ext tensorboard
"""
Download dati"""
!wget http://c1p81.altervista.org/detrend2.zip
!unzip detrend2.zip
"""Carica i dati in matrice"""
df=pd.read_csv(r'detrend2.csv', sep=',', header=0,index_col=['Data'])
df.head()
plt.plot(df['Est.[mm]'])
plt.title("Estensimetro")
plt.xlabel('Datetime')
plt.ylabel('Est')
plt.show()
#finestra mobile dei dati
n_past = 300
n_future = 100
n_features = 1
# divide il dataset in 75% train, 25 % test
righe = df.shape[0]
t = np.round(righe*0.75,0)
print(t)
train_df,test_df = df[1:6000], df[6000:]
# il dataset di test non entra nel calcolo della rete neurale
# e' lo stato futuro che la rete deve prevedere
# qui si trova la rottura della tendenza
plt.plot(test_df['Est.[mm]'])
plt.title("Train dataset estensimetro Dettaglio")
plt.xlabel('Datetime')
plt.ylabel('Est')
plt.show()
#riscala i dati per il calcolo della rete
train = train_df
scalers={}
for i in train_df.columns:
scaler = MinMaxScaler(feature_range=(-1,1))
s_s = scaler.fit_transform(train[i].values.reshape(-1,1))
s_s=np.reshape(s_s,len(s_s))
scalers['scaler_'+ i] = scaler
train[i]=s_s
test = test_df
for i in train_df.columns:
scaler = scalers['scaler_'+i]
s_s = scaler.transform(test[i].values.reshape(-1,1))
s_s=np.reshape(s_s,len(s_s))
scalers['scaler_'+i] = scaler
test[i]=s_s
"""**Converting the series to samples for supervised learning**"""
def split_series(series, n_past, n_future):
#
# n_past ==> no of past observations
#
# n_future ==> no of future observations
#
X, y = list(), list()
for window_start in range(len(series)):
past_end = window_start + n_past
future_end = past_end + n_future
if future_end > len(series):
break
# slicing the past and future parts of the window
past, future = series[window_start:past_end, :], series[past_end:future_end, :]
X.append(past)
y.append(future)
return np.array(X), np.array(y)
X_train, y_train = split_series(train.values,n_past, n_future)
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1],n_features))
y_train = y_train.reshape((y_train.shape[0], y_train.shape[1], n_features))
X_test, y_test = split_series(test.values,n_past, n_future)
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_features))
y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_features))
X_test.shape
print(train)
# E1D1
# n_features ==> no of features at each timestep in the data.
#
encoder_inputs = tf.keras.layers.Input(shape=(n_past, n_features))
encoder_l1 = tf.keras.layers.LSTM(300, return_state=True)
encoder_outputs1 = encoder_l1(encoder_inputs)
encoder_states1 = encoder_outputs1[1:]
#
decoder_inputs = tf.keras.layers.RepeatVector(n_future)(encoder_outputs1[0])
#
decoder_l1 = tf.keras.layers.LSTM(300, return_sequences=True,dropout=0.5,recurrent_dropout=0.5)(decoder_inputs,initial_state = encoder_states1)
decoder_outputs1 = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(n_features))(decoder_l1)
#
model_e1d1 = tf.keras.models.Model(encoder_inputs,decoder_outputs1)
model_e1d1.summary()
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.90 ** x)
epoche = 15
#tensorboard start
import datetime
log_dir = "Logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
#tensorboard start
model_e1d1.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
history_e1d1=model_e1d1.fit(X_train,y_train,epochs=epoche,validation_data=(X_test,y_test),batch_size=164,verbose=1,callbacks=[reduce_lr,tensorboard_callback])
plt.plot(history_e1d1.history['loss'])
plt.plot(history_e1d1.history['val_loss'])
plt.title("Model Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(['Train', 'Valid'])
plt.show()
plt.plot(history_e1d1.history['lr'])
plt.title("Model Lr")
plt.xlabel('Epochs')
plt.ylabel('Lr')
plt.show()
"""Predizione dei dati"""
pred1_e1d1=model_e1d1.predict(X_test)
pred_e1d1=model_e1d1.predict(X_train)
"""Ritorna dai valori scalati ai valori reali"""
for index,i in enumerate(train_df.columns):
scaler = scalers['scaler_'+i]
pred1_e1d1[:,:,index]=scaler.inverse_transform(pred1_e1d1[:,:,index])
pred_e1d1[:,:,index]=scaler.inverse_transform(pred_e1d1[:,:,index])
y_train[:,:,index]=scaler.inverse_transform(y_train[:,:,index])
y_test[:,:,index]=scaler.inverse_transform(y_test[:,:,index])
#print(y_train)
np.savetxt('array_ytrain_X.csv', y_train[:,:,0], delimiter=';', fmt='%f')
np.savetxt('array_ytrain_Y.csv', y_train[:,:,1], delimiter=';', fmt='%f')
np.savetxt('array_ytrain_Est.csv', y_train[:,:,2], delimiter=';', fmt='%f')
np.savetxt('array_ytrain_T.csv', y_train[:,:,3], delimiter=';', fmt='%f')
np.savetxt('array_ytrain_bat.csv', y_train[:,:,4], delimiter=';', fmt='%f')
np.savetxt('array_ytest_X.csv', y_test[:,:,0], delimiter=';', fmt='%f')
np.savetxt('array_ytest_Y.csv', y_test[:,:,1], delimiter=';', fmt='%f')
np.savetxt('array_ytest_Est.csv', y_test[:,:,2], delimiter=';', fmt='%f')
np.savetxt('array_ytest_T.csv', y_test[:,:,3], delimiter=';', fmt='%f')
np.savetxt('array_ytest_bat.csv', y_test[:,:,4], delimiter=';', fmt='%f')
print(pred1_e1d1[:,:,0])
"""**Checking Error** """
#errore medio tra modello e dati reali
from sklearn.metrics import mean_absolute_error
print(y_test.shape)
#print(pred1_e1d1[:,:,2])
#print(y_test[:,:,2])
#print(y_test.shape[0])
diff = pred1_e1d1[:,:,0]-y_test[:,:,0]
print("Diff")
#print(diff)
print(diff.shape)
#print(np.square(diff))
sommaq=np.sum(np.square(diff[0]))
print(sommaq.shape)
s_err1=np.sqrt(sommaq/y_test.shape[0]*y_test.shape[1])
print()
print(sommaq)
#err1 = np.square(pred1_e1d1[:,:,2]-y_test[:,:,2])
#s_err1 = np.sqrt(np.sum(err1, axis=0))
#print("Errore quadratico medio (mm)")
print(s_err1)
#print(y_test)
plt.plot(y_test[:,99,0])
plt.plot(pred1_e1d1[:,99,0])
plt.title("Modello vs monitoraggio")
plt.xlabel('Tempo')
plt.ylabel('Est')
plt.legend(['Dati reali', 'Modello'])
plt.show()
# Modello contro monitoraggio nei dati di addestramento
plt.plot(y_train[:,99,2])
plt.plot(pred_e1d1[:,99,2])
plt.title("")
plt.xlabel('Tempo')
plt.ylabel('Est (mm)')
plt.legend(['Monitoraggio', 'Modello'])
plt.show()
# dettaglio del grafico Modello contro dati reali
# si nota come il modello copia in modo idoneo anche
# le variazioni a corto periodo
plt.plot(y_train[1:1000,99,2])
plt.plot(pred_e1d1[1:1000,99,2])
plt.title("")
plt.xlabel('Tempo')
plt.ylabel('Est (mm)')
plt.legend(['Monitoraggio', 'Modello'])
plt.show()
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir Logs/fit
Nessun commento:
Posta un commento