Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F74180241
train_model.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Fri, Jul 26, 08:02
Size
30 KB
Mime Type
text/x-python
Expires
Sun, Jul 28, 08:02 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
19352392
Attached To
rMLECMO Machine Learning ECMO
train_model.py
View Options
import
tensorflow
as
tf
from
tensorflow
import
keras
from
tensorflow.compat.v1
import
train
from
tensorflow.keras.layers
import
Dense
,
Flatten
,
Conv1D
,
MaxPooling1D
,
GlobalAveragePooling1D
,
Dropout
,
TimeDistributed
,
LSTM
from
tensorflow.keras.activations
import
relu
,
sigmoid
,
elu
,
tanh
tf
.
compat
.
v1
.
enable_eager_execution
()
def
train_model
(
model
,
train_input
,
train_target
,
dim_recurrent
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train a classic recurrent neural network model using recurrent state.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param dim_recurrent: Dimension of the recurrent space
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Initialize each state at zero
states
=
list
(
tf
.
zeros
(
shape
=
[
dim_recurrent
])
for
i
in
range
(
train_input
.
shape
[
0
]))
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
output
,
states
[
b
:
b
+
batch_size
]
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
states
[
b
:
b
+
batch_size
])
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_LSTM
(
model
,
train_input
,
train_target
,
dim_recurrent
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train a classic long short term memory network model using recurrent state and candidate state.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param dim_recurrent: Dimension of the recurrent space
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Initialize each state at zero
states
=
list
(
tf
.
zeros
(
shape
=
[
dim_recurrent
])
for
i
in
range
(
train_input
.
shape
[
0
]))
candidates
=
list
(
tf
.
zeros
(
shape
=
[
dim_recurrent
])
for
i
in
range
(
train_input
.
shape
[
0
]))
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
output
,
states
[
b
:
b
+
batch_size
],
candidates
[
b
:
b
+
batch_size
]
\
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
states
[
b
:
b
+
batch_size
],
candidates
[
b
:
b
+
batch_size
])
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_mv
(
model
,
train_input
,
train_target
,
NAN_targets
,
states
,
learning_rate
,
num_epochs
=
25
,
mini_batch_size
=
10
,
loss_tab
=
[]):
"""
Train model with missing values data. In construction.
"""
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
for
epoch
in
range
(
num_epochs
):
#perm = np.random.permutation(500)
#train_input = train_input[perm, :, :]
#train_target = train_target[perm, :, :]
#states = list(train_target[i, 0, :] for i in range(500))
states
=
list
(
tf
.
zeros
(
shape
=
[
states
[
0
]
.
shape
[
0
]])
for
i
in
range
(
train_input
.
shape
[
0
]))
for
b
in
range
(
0
,
train_input
.
shape
[
0
],
mini_batch_size
):
loss_value
=
0
for
t
in
range
(
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
output
,
states
[
b
:
b
+
mini_batch_size
]
=
model
(
train_input
[
b
:
b
+
mini_batch_size
,
t
],
states
[
b
:
b
+
mini_batch_size
])
#loss_value += mse(output, train_target[b:b + mini_batch_size, t])
loss_value
+=
mse
(
tf
.
boolean_mask
(
output
,
tf
.
logical_not
(
NAN_targets
[
b
:
b
+
mini_batch_size
,
t
])),
tf
.
boolean_mask
(
train_target
[
b
:
b
+
mini_batch_size
,
t
],
tf
.
logical_not
(
NAN_targets
[
b
:
b
+
mini_batch_size
,
t
])))
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
#print(loss_value)
loss_tab
.
append
(
loss_value
/
61
)
print
(
"Epoch : "
,
epoch
)
print
(
"Loss : "
,
loss_value
)
def
train_model_2cells
(
model
,
train_input
,
train_target
,
states
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train recurrent neural network with two cells.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
for
epoch
in
range
(
num_epochs
):
states
=
list
(
tf
.
zeros
(
shape
=
[
states
[
0
]
.
shape
[
0
]])
for
i
in
range
(
nb_samples
))
for
b
in
range
(
0
,
train_input
.
shape
[
0
],
batch_size
):
loss_value
=
0
for
t
in
range
(
1
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
input_data
=
list
(
train_input
[
b
:
b
+
batch_size
,
i
]
for
i
in
range
(
t
-
1
,
t
+
1
))
output
,
states
[
b
:
b
+
batch_size
]
=
model
(
input_data
,
states
[
b
:
b
+
batch_size
])
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
epoch
)
def
train_model_multistep
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
1
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
])
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_mlp
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
1
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
],
t
)
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_mlp_2
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
1
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
if
t
==
1
:
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
],
t
)
else
:
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
output
,
t
)
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_mlp_3
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
outputs
=
[]
for
t
in
range
(
3
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
if
t
>=
3
:
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
2
],
train_target
[
b
:
b
+
batch_size
,
t
-
3
],
t
)
outputs
.
append
(
output
)
#elif t == 4:
# output = model(train_input[b:b + batch_size, t], outputs[0],
# train_target[b:b + batch_size, t - 2],
# train_target[b:b + batch_size, t - 3], t)
# outputs.append(output)
# elif t == 5:
# output = model(train_input[b:b + batch_size, t], outputs[1],
# outputs[0], train_target[b:b + batch_size, t - 3], t)
# outputs.append(output)
# else:
# output = model(train_input[b:b + batch_size, t], outputs[2],
# outputs[1], outputs[0], t)
# outputs.pop(0)
# outputs.append(output)
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_mlp_4
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
outputs
=
[]
for
t
in
range
(
3
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
if
t
>=
3
:
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
2
],
t
)
outputs
.
append
(
output
)
#elif t == 4:
# output = model(train_input[b:b + batch_size, t], outputs[0],
# train_target[b:b + batch_size, t - 2],
# train_target[b:b + batch_size, t - 3], t)
# outputs.append(output)
# elif t == 5:
# output = model(train_input[b:b + batch_size, t], outputs[1],
# outputs[0], train_target[b:b + batch_size, t - 3], t)
# outputs.append(output)
# else:
# output = model(train_input[b:b + batch_size, t], outputs[2],
# outputs[1], outputs[0], t)
# outputs.pop(0)
# outputs.append(output)
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
train_model_multistep_2
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with one step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
print
(
"Epoch : "
,
epoch
)
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
1
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
if
t
==
1
:
# Make a prediction at current time
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
])
else
:
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
output
)
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
def
train_model_multisteps
(
model
,
train_input
,
train_target
,
learning_rate
,
num_epochs
=
25
,
batch_size
=
10
,
loss_tab
=
[]):
"""
Train multi-step recurrent neural network with two step backward in time.
:param model: Model to train
:param train_input: Train input tensor containing input data
:param train_target: Train target tensor containing labels
:param learning_rate: Learning rate used for the iterative optimization method
:param num_epochs: Number of epochs used to train the network
:param batch_size: Number of samples per batch for the training of the network
:param loss_tab: List that will contains the values of the loss for each batch iteration and time step
"""
# Define the optimization method
optimizer
=
train
.
AdamOptimizer
(
learning_rate
=
learning_rate
)
# Define the loss
mse
=
tf
.
keras
.
losses
.
MeanSquaredError
()
global_step
=
tf
.
Variable
(
0
)
nb_samples
,
time_steps
=
train_input
.
shape
[
0
]
.
value
,
train_input
.
shape
[
1
]
.
value
# Iterate over the epochs
for
epoch
in
range
(
num_epochs
):
# Shuffle the samples
indices
=
tf
.
range
(
start
=
0
,
limit
=
tf
.
shape
(
train_input
)[
0
],
dtype
=
tf
.
int32
)
shuffled_indices
=
tf
.
random
.
shuffle
(
indices
)
train_input
=
tf
.
gather
(
train_input
,
shuffled_indices
)
train_target
=
tf
.
gather
(
train_target
,
shuffled_indices
)
# Loop over the samples, take batches of size 'mini_batch_size'
for
b
in
range
(
0
,
nb_samples
,
batch_size
):
# For each sample we initialize the loss_value to zero
loss_value
=
0
for
t
in
range
(
2
,
train_input
.
shape
[
1
]):
with
tf
.
GradientTape
()
as
tape
:
# Make a prediction at current time
output
=
model
(
train_input
[
b
:
b
+
batch_size
,
t
],
train_input
[
b
:
b
+
batch_size
,
t
-
1
],
train_input
[
b
:
b
+
batch_size
,
t
-
2
],
train_target
[
b
:
b
+
batch_size
,
t
-
1
],
train_target
[
b
:
b
+
batch_size
,
t
-
2
])
# Accumulate the loss
loss_value
+=
mse
(
output
,
train_target
[
b
:
b
+
batch_size
,
t
])
# Update the gradients
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_variables
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_variables
),
global_step
)
loss_tab
.
append
(
loss_value
/
time_steps
)
print
(
"Epoch : "
,
epoch
)
def
tempconvnet
(
offset
,
dim_input
,
dim_output
,
loss
,
optimizer
):
"""
Build a classic convolutional neural network (CNN)
:param offset: Size of the window used to apply the convolution
:param dim_input: Number of input signals per sample
:param dim_output: Number of output parameters per sample
:param loss: Loss used to train the network
:param optimizer: Optimization method used to train the network
:return: Convolutional neural network (CNN)
"""
convmodel
=
keras
.
Sequential
([
Conv1D
(
64
,
2
,
activation
=
'relu'
,
input_shape
=
(
offset
,
dim_input
)),
Conv1D
(
32
,
2
,
activation
=
'relu'
),
GlobalAveragePooling1D
(),
Dropout
(
0.5
),
Dense
(
dim_output
,
activation
=
'relu'
)])
convmodel
.
compile
(
loss
=
loss
,
optimizer
=
optimizer
,
metrics
=
[
'accuracy'
])
return
convmodel
def
tempconvnetdilated
(
offset
,
dim_input
,
dim_output
,
loss
,
optimizer
):
"""
Build a dilated causal convolutional neural network (DCCNN)
:param offset: Size of the window used to apply the convolution
:param dim_input: Number of input signals per sample
:param dim_output: Number of output parameters per sample
:param loss: Loss used to train the network
:param optimizer: Optimization method used to train the network
:return: Dilated causal convolutional neural network (DCCNN)
"""
convmodel
=
keras
.
Sequential
([
Conv1D
(
32
,
2
,
activation
=
'relu'
,
input_shape
=
(
offset
,
dim_input
),
padding
=
'causal'
,
dilation_rate
=
2
),
#MaxPooling1D(2),
Conv1D
(
32
,
2
,
activation
=
'relu'
,
padding
=
'causal'
,
dilation_rate
=
4
),
Conv1D
(
32
,
2
,
activation
=
'relu'
,
padding
=
'causal'
,
dilation_rate
=
8
),
Conv1D
(
32
,
2
,
activation
=
'relu'
,
padding
=
'causal'
,
dilation_rate
=
16
),
Conv1D
(
32
,
2
,
activation
=
'relu'
,
padding
=
'causal'
,
dilation_rate
=
32
),
Conv1D
(
32
,
2
,
activation
=
'relu'
,
padding
=
'causal'
,
dilation_rate
=
64
),
GlobalAveragePooling1D
(),
Dense
(
128
,
activation
=
'relu'
),
Dropout
(
0.5
),
Dense
(
dim_output
,
activation
=
'relu'
)])
convmodel
.
compile
(
loss
=
loss
,
optimizer
=
optimizer
,
metrics
=
[
'accuracy'
])
return
convmodel
def
train_tempconvnet
(
convnet
,
train_input
,
train_target
,
time_steps
,
offset
,
batch_size
,
nb_epochs
):
"""
Train a 1D convolutional neural network
:param convnet: Neural network
:param train_input: Input tensor
:param train_target: Labels, target tensor
:param time_steps: Number of time steps
:param offset: Size of the window used to apply the convolution
:param batch_size: Size of the batches used to train the network
:param nb_epochs: Number of epochs used to train the network
"""
for
t
in
range
(
time_steps
-
offset
):
convnet
.
fit
(
train_input
[:,
t
:
offset
+
t
],
train_target
[:,
offset
+
t
],
batch_size
=
batch_size
,
epochs
=
nb_epochs
)
def
lstm_net
(
dim_input
,
dim_output
,
window
):
model
=
keras
.
Sequential
([
keras
.
layers
.
LSTM
(
100
,
input_shape
=
(
window
,
dim_input
)),
keras
.
layers
.
LSTM
(
200
),
keras
.
layers
.
LSTM
(
100
),
Dense
(
128
,
activation
=
'sigmoid'
),
Dropout
(
0.3
),
Dense
(
dim_output
,
activation
=
'sigmoid'
)])
model
.
compile
(
optimizer
=
'adam'
,
loss
=
'mse'
,
metrics
=
[
'accuracy'
])
model
.
summary
()
return
model
Event Timeline
Log In to Comment