Page MenuHomec4science

plot.py
No OneTemporary

File Metadata

Created
Fri, Aug 30, 17:09
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import os
def plot(test_target, test_input, train_input, predictions, train_loss_tab, t, nb_epochs, dim_recurrent,
batch_size, nb_test, Nt):
"""
Plot training loss and samples of predictions, targets and inputs in newly created directory.
:param test_target: Test labels, target tensor
:param test_input: Test input tensor
:param train_input: Train input tensor
:param predictions: Predictions made by the model
:param train_loss_tab: List containing value of the loss for every batch iteration and every time
:param t: Time interval
:param nb_epochs: Number of epochs used to train the model
:param dim_recurrent: Dimension of the recurrent space
:param batch_size: Batch size used to train with mini-batch stochastic gradient descent
:param nb_test: NUmber of samples to plot
:param Nt: Id of the directory where to put the plots
"""
# Make directory
os.mkdir('Try_' + str(Nt))
# Write with Latex font
matplotlib.rcParams['text.usetex'] = True
# Plot some random samples
nb_samples, dim_input, dim_output = test_input.shape[0].value, test_input.shape[2].value, test_target.shape[2].value
random_samples = np.random.randint(nb_samples, size=nb_test)
random_output_parameters = np.random.randint(dim_output, size=nb_test)
random_input_parameters = np.random.randint(dim_input, size=nb_test)
for i in range(nb_test):
plt.plot(t, test_target[random_samples[i], :, random_output_parameters[i]], color='green', label='target')
plt.plot(t, predictions[random_samples[i], :, random_output_parameters[i]], color='red', label='prediction')
plt.legend()
plt.title(r'$N_{epochs}=' + str(nb_epochs) + '$, $|B|=' + str(batch_size) + '$, $N_{Samples}=' + str(nb_samples) +
'$, $N_{in}=N_{out}=' + str(dim_input) + '$, $N_{rec}=' + str(dim_recurrent) + '$')
plt.savefig('Try_' + str(Nt) + '/predictions_' + str(i))
plt.show()
for i in range(nb_test):
plt.plot(t, test_input[random_samples[i], :, random_input_parameters[i]], color='blue', label='input')
plt.title(r'$N_{epochs}=' + str(nb_epochs) + '$, $|B|=' + str(batch_size) + '$, $N_{Samples}=' + str(nb_samples) +
'$, $N_{in}=N_{out}=' + str(dim_input) + '$, $N_{rec}=' + str(dim_recurrent) + '$')
plt.legend()
plt.savefig('Try_' + str(Nt) + '/input_' + str(i))
plt.show()
if train_loss_tab == []:
pass
else:
# Plot the training loss
axis = [i for i in range(nb_epochs * train_input.shape[0]//batch_size)]
plt.semilogy(axis, train_loss_tab, color='black')
plt.xlabel(r'Number of epochs $\times$ Number of samples $/$ Batch size')
plt.title(r'Loss for $N_{epochs}=' + str(nb_epochs) + '$, $|B|=' + str(batch_size) + '$, $N_{Samples}=' + str(nb_samples) +
'$, $N_{in}=N_{out}=' + str(dim_input) + '$, $N_{rec}=' + str(dim_recurrent) + '$')
plt.savefig('Try_' + str(Nt) + '/loss_multistep_' + str(Nt))
plt.show()

Event Timeline