Page MenuHomec4science

testConvNet.py
No OneTemporary

File Metadata

Created
Thu, Nov 7, 23:58

testConvNet.py

import numpy as np
import tensorflow as tf
from RNN_synthetic import *
from plot import *
nb_epochs, batch_size, nb_samples = 10, 25, 1000
dim_input, dim_recurrent, dim_output = 3, 32, 3
learning_rate = 0.001
time_steps, offset = 71, 30
path = '../../synthetic_data/'
t_init, t_fin = 0, 5
Nt = 22
################################################################################################
################################################################################################
# READING FILES
INPUT_TRAIN_FILE_NAMES = [path + 'files_' + str(dim_input) + '/train/input/file_' + str(i) + '.txt' for i in range(nb_samples)]
TARGET_TRAIN_FILE_NAMES = [path + 'files_' + str(dim_output) + '/train/target/file_' + str(i) + '.txt' for i in range(nb_samples)]
INPUT_TEST_FILE_NAMES = [path + 'files_' + str(dim_input) + '/test/input/file_' + str(i) + '.txt' for i in range(nb_samples)]
TARGET_TEST_FILE_NAMES = [path + 'files_' + str(dim_output) + '/test/target/file_' + str(i) + '.txt' for i in range(nb_samples)]
input_train_arrays, target_train_arrays = [], []
input_test_arrays, target_test_arrays = [], []
train_loss_tab = []
for i in range(nb_samples):
input_train_arrays.append(np.loadtxt(INPUT_TRAIN_FILE_NAMES[i], dtype=np.float32))
target_train_arrays.append(np.loadtxt(TARGET_TRAIN_FILE_NAMES[i], dtype=np.float32))
input_test_arrays.append(np.loadtxt(INPUT_TEST_FILE_NAMES[i], dtype=np.float32))
target_test_arrays.append(np.loadtxt(TARGET_TEST_FILE_NAMES[i], dtype=np.float32))
# Stack the different samples
train_input = tf.stack([input_train_arrays[i] for i in range(nb_samples)])
train_target = tf.stack([target_train_arrays[i] for i in range(nb_samples)])
test_input = tf.stack([input_test_arrays[i] for i in range(nb_samples)])
test_target = tf.stack([target_test_arrays[i] for i in range(nb_samples)])
# Transpose the second and the third dimension in order to have (samples, time, parameters)
train_input = tf.transpose(train_input, perm=[0, 2, 1])
train_target = tf.transpose(train_target, perm=[0, 2, 1])
test_input = tf.transpose(test_input, perm=[0, 2, 1])
test_target = tf.transpose(test_target, perm=[0, 2, 1])
################################################################################################
################################################################################################
# NORMALIZATION
# Get the min and the max along the first dimension (samples)
max_input = tf.reduce_max(train_input, reduction_indices=[0])
min_input = tf.reduce_min(train_input, reduction_indices=[0])
max_target = tf.reduce_max(train_target, reduction_indices=[0])
min_target = tf.reduce_min(train_target, reduction_indices=[0])
# And then along the time dimension
max_input = tf.reduce_max(max_input, reduction_indices=[0])
min_input = tf.reduce_min(min_input, reduction_indices=[0])
max_target = tf.reduce_max(max_target, reduction_indices=[0])
min_target = tf.reduce_min(min_target, reduction_indices=[0])
# Apply the normalization
train_input = tf.divide(train_input - min_input, max_input - min_input)
test_input = tf.divide(test_input - min_input, max_input - min_input)
train_target = tf.divide(train_target - min_target, max_target - min_target)
test_target = tf.divide(test_target - min_target, max_target - min_target)
################################################################################################
################################################################################################
# TRAINING MODEL
model = tempconvnetdilated(offset, dim_input, dim_output, 'mse', 'adam')
model.summary()
train_tempconvnet(model, train_input, train_target, time_steps, offset, batch_size, nb_epochs)
################################################################################################
################################################################################################
# TEST
# Make Predictions
predictions = []
for i in range(offset):
# Add the first values of the targets
predictions.append(test_target[:, i, :])
for i in range(time_steps-offset):
# Make predictions at current time using a window of size 'offset'
prediction = model(test_input[:, i:offset+i, :])
predictions.append(prediction)
print(prediction.shape, " ", i)
predictions = tf.stack(predictions, axis=1)
################################################################################################
################################################################################################
# PLOT
# Apply inverse normalization
predictions = tf.multiply(predictions, max_target - min_target) + min_target
test_target = tf.multiply(test_target, max_target - min_target) + min_target
# Get time interval
t = [((t_fin-t_init)/(time_steps-1))*i for i in range(time_steps)]
# Define number of plots
nb_test = 5
# Plot the training loss and samples of inputs/outputs
plot(test_target, test_input, train_input, predictions, train_loss_tab, t, nb_epochs, dim_recurrent,
batch_size, nb_test, Nt)

Event Timeline