Page MenuHomec4science

test_Net.py
No OneTemporary

File Metadata

Created
Fri, Nov 8, 17:38

test_Net.py

import numpy as np
import tensorflow as tf
from models import *
from train_model import *
from plot import *
nb_epochs, batch_size, nb_samples = 10, 25, 2000
dim_input, dim_recurrent, dim_output = 1, 32, 2
learning_rate = 1e-3
path = '../../synthetic_data/'
t_init, t_fin = 0, 5
Nt = 25
################################################################################################
################################################################################################
# READING FILES
INPUT_TRAIN_FILE_NAMES = [path + 'files_' + str(dim_input+dim_output) + '/train/input/file_' + str(i) + '.txt' for i in range(nb_samples)]
TARGET_TRAIN_FILE_NAMES = [path + 'files_' + str(dim_input+dim_output) + '/train/target/file_' + str(i) + '.txt' for i in range(nb_samples)]
INPUT_TEST_FILE_NAMES = [path + 'files_' + str(dim_input+dim_output) + '/test/input/file_' + str(i) + '.txt' for i in range(nb_samples)]
TARGET_TEST_FILE_NAMES = [path + 'files_' + str(dim_input+dim_output) + '/test/target/file_' + str(i) + '.txt' for i in range(nb_samples)]
input_train_arrays, target_train_arrays = [], []
input_test_arrays, target_test_arrays = [], []
train_loss_tab = []
for i in range(nb_samples):
input_train_arrays.append(np.reshape(np.loadtxt(INPUT_TRAIN_FILE_NAMES[i], dtype=np.float32), [dim_input, -1]))
target_train_arrays.append(np.reshape(np.loadtxt(TARGET_TRAIN_FILE_NAMES[i], dtype=np.float32), [dim_output, -1]))
input_test_arrays.append(np.reshape(np.loadtxt(INPUT_TEST_FILE_NAMES[i], dtype=np.float32), [dim_input, -1]))
target_test_arrays.append(np.reshape(np.loadtxt(TARGET_TEST_FILE_NAMES[i], dtype=np.float32), [dim_output, -1]))
# Stack the different samples
train_input = tf.stack([input_train_arrays[i] for i in range(nb_samples)])
train_target = tf.stack([target_train_arrays[i] for i in range(nb_samples)])
test_input = tf.stack([input_test_arrays[i] for i in range(nb_samples)])
test_target = tf.stack([target_test_arrays[i] for i in range(nb_samples)])
# Transpose the second and the third dimension in order to have (samples, time, parameters)
train_input = tf.transpose(train_input, perm=[0, 2, 1])
train_target = tf.transpose(train_target, perm=[0, 2, 1])
test_input = tf.transpose(test_input, perm=[0, 2, 1])
test_target = tf.transpose(test_target, perm=[0, 2, 1])
# Get the time range
time_steps = test_target.shape[1].value
################################################################################################
################################################################################################
# NORMALIZATION
# Get the min and the max along the first dimension (samples)
max_input = tf.reduce_max(train_input, reduction_indices=[0])
min_input = tf.reduce_min(train_input, reduction_indices=[0])
max_target = tf.reduce_max(train_target, reduction_indices=[0])
min_target = tf.reduce_min(train_target, reduction_indices=[0])
# And then along the time dimension
max_input = tf.reduce_max(max_input, reduction_indices=[0])
min_input = tf.reduce_min(min_input, reduction_indices=[0])
max_target = tf.reduce_max(max_target, reduction_indices=[0])
min_target = tf.reduce_min(min_target, reduction_indices=[0])
# Apply the normalization
train_input = tf.divide(train_input - min_input, max_input - min_input)
test_input = tf.divide(test_input - min_input, max_input - min_input)
train_target = tf.divide(train_target - min_target, max_target - min_target)
test_target = tf.divide(test_target - min_target, max_target - min_target)
################################################################################################
################################################################################################
# CHOICE OF THE MODEL
model_type = 'MLP_3'
if model_type == 'GRU_multisteps':
model = GRU_multisteps(dim_input, dim_recurrent, dim_output)
train_model_multistep(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'lstm':
window = 30
model = lstm_net(dim_input, dim_output, window)
model.fit(train_target[:, :window, :], train_target[:, window+1:, :], epochs=100, batch_size=32)
elif model_type == 'MLP_1':
model = MLP_1(dim_input, dim_recurrent, dim_output)
train_model_mlp_2(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'MLP_2':
model = MLP_2(dim_input, dim_recurrent, dim_output)
train_model_mlp_3(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'MLP_3':
model = MLP_3(dim_input, dim_recurrent, dim_output)
train_model_mlp_4(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'GatedRNNMultistep':
model = GatedRNNMultistep(dim_input, dim_recurrent, dim_output)
train_model_multistep(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'GRUMultisteps2':
model = GRUMultisteps2(dim_input, dim_recurrent, dim_output)
train_model_multisteps(model, train_input, train_target, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'RNN':
model = RNN(dim_input, dim_recurrent, dim_output)
train_model(model, train_input, train_target, dim_recurrent, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'GRU':
model = GRU(dim_input, dim_recurrent, dim_output)
train_model(model, train_input, train_target, dim_recurrent, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'GatedRNNComplex':
model = GatedRNNComplex(dim_input, dim_recurrent, dim_output)
train_model(model, train_input, train_target, dim_recurrent, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'LSTM':
model = LSTM(dim_input, dim_recurrent, dim_output)
train_model_LSTM(model, train_input, train_target, dim_recurrent, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
elif model_type == 'RNN2Cells':
model = RNN2Cells(dim_input, dim_recurrent, dim_output)
train_model_2cells(model, train_input, train_target, states, learning_rate, num_epochs=nb_epochs,
batch_size=batch_size, loss_tab=train_loss_tab)
################################################################################################
################################################################################################
# TEST
time_step_start = 0
t_start = time_step_start*(t_fin-t_init)/(time_steps-1)
# Make predictions with test input
if model_type == 'GRU_multisteps' or model_type == 'GRU_multisteps2' or model_type == 'GatedRNNMultistep':
predictions = model.predict(test_input, test_target, time_steps)
elif model_type=='MLP_1' or model_type=='MLP_2' or model_type=='MLP_3':
predictions = model.predict(test_input, test_target, time_steps, time_step_start)
else:
predictions = model.predict(test_input, time_steps, dim_recurrent)
################################################################################################
################################################################################################
# PLOT
# Apply inverse normalization
predictions = tf.multiply(predictions, max_target - min_target) + min_target
test_target = tf.multiply(test_target, max_target - min_target) + min_target
test_input = tf.multiply(test_input, max_input - min_input) + min_input
# Get time interval
t = [((t_fin-t_start)/(time_steps-1))*i for i in range(time_steps)]
# Define number of plots
nb_test = 10
# Plot the training loss and samples of inputs/outputs
plot(test_target, test_input, train_input, predictions, train_loss_tab, t, nb_epochs, dim_recurrent,
batch_size, nb_test, Nt, time_step_start)

Event Timeline