Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F80210255
LSTM.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Fri, Aug 30, 11:38
Size
4 KB
Mime Type
text/x-python
Expires
Sun, Sep 1, 11:38 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
20323434
Attached To
rMLECMO Machine Learning ECMO
LSTM.py
View Options
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow
import
keras
import
RNN_synthetic
as
frw
from
matplotlib
import
pyplot
as
plt
from
tensorflow.keras.layers
import
Dense
,
Flatten
,
Conv1D
,
MaxPooling1D
,
GlobalAveragePooling1D
,
Dropout
,
TimeDistributed
,
LSTM
nb_epochs
,
batch_size
,
nb_samples
=
25
,
50
,
1000
dim_input
,
dim_recurrent
,
dim_output
=
3
,
32
,
3
learning_rate
=
1e-3
time_steps
=
71
offset
=
5
path
=
'../../synthetic_data/'
# Open the files
INPUT_TRAIN_FILE_NAMES
=
[
path
+
'files_'
+
str
(
dim_input
)
+
'/train/input/file_'
+
str
(
i
)
+
'.txt'
for
i
in
range
(
nb_samples
)]
TARGET_TRAIN_FILE_NAMES
=
[
path
+
'files_'
+
str
(
dim_input
)
+
'/train/target/file_'
+
str
(
i
)
+
'.txt'
for
i
in
range
(
nb_samples
)]
INPUT_TEST_FILE_NAMES
=
[
path
+
'files_'
+
str
(
dim_input
)
+
'/test/input/file_'
+
str
(
i
)
+
'.txt'
for
i
in
range
(
nb_samples
)]
TARGET_TEST_FILE_NAMES
=
[
path
+
'files_'
+
str
(
dim_input
)
+
'/test/target/file_'
+
str
(
i
)
+
'.txt'
for
i
in
range
(
nb_samples
)]
input_train_arrays
,
target_train_arrays
=
[],
[]
input_test_arrays
,
target_test_arrays
=
[],
[]
train_loss_tab
=
[]
for
i
in
range
(
nb_samples
):
input_train_arrays
.
append
(
np
.
loadtxt
(
INPUT_TRAIN_FILE_NAMES
[
i
],
dtype
=
np
.
float32
))
target_train_arrays
.
append
(
np
.
loadtxt
(
TARGET_TRAIN_FILE_NAMES
[
i
],
dtype
=
np
.
float32
))
input_test_arrays
.
append
(
np
.
loadtxt
(
INPUT_TEST_FILE_NAMES
[
i
],
dtype
=
np
.
float32
))
target_test_arrays
.
append
(
np
.
loadtxt
(
TARGET_TEST_FILE_NAMES
[
i
],
dtype
=
np
.
float32
))
# Stack the different samples
train_input
=
tf
.
stack
([
input_train_arrays
[
i
]
for
i
in
range
(
nb_samples
)])
train_target
=
tf
.
stack
([
target_train_arrays
[
i
]
for
i
in
range
(
nb_samples
)])
test_input
=
tf
.
stack
([
input_test_arrays
[
i
]
for
i
in
range
(
nb_samples
)])
test_target
=
tf
.
stack
([
target_test_arrays
[
i
]
for
i
in
range
(
nb_samples
)])
# Transpose the second and the third dimension in order to have (samples, time, parameters)
train_input
=
tf
.
transpose
(
train_input
,
perm
=
[
0
,
2
,
1
])
train_target
=
tf
.
transpose
(
train_target
,
perm
=
[
0
,
2
,
1
])
test_input
=
tf
.
transpose
(
test_input
,
perm
=
[
0
,
2
,
1
])
test_target
=
tf
.
transpose
(
test_target
,
perm
=
[
0
,
2
,
1
])
################################################################################################
# NORMALIZATION
# Get the min and the max along the first dimension (samples)
max_input
=
tf
.
reduce_max
(
train_input
,
reduction_indices
=
[
0
])
min_input
=
tf
.
reduce_min
(
train_input
,
reduction_indices
=
[
0
])
max_target
=
tf
.
reduce_max
(
train_target
,
reduction_indices
=
[
0
])
min_target
=
tf
.
reduce_min
(
train_target
,
reduction_indices
=
[
0
])
max_input
=
tf
.
reduce_max
(
max_input
,
reduction_indices
=
[
0
])
min_input
=
tf
.
reduce_min
(
min_input
,
reduction_indices
=
[
0
])
max_target
=
tf
.
reduce_max
(
max_target
,
reduction_indices
=
[
0
])
min_target
=
tf
.
reduce_min
(
min_target
,
reduction_indices
=
[
0
])
# Apply the normalization
train_input
=
tf
.
divide
(
train_input
-
min_input
,
max_input
-
min_input
)
test_input
=
tf
.
divide
(
test_input
-
min_input
,
max_input
-
min_input
)
train_target
=
tf
.
divide
(
train_target
-
min_target
,
max_target
-
min_target
)
test_target
=
tf
.
divide
(
test_target
-
min_target
,
max_target
-
min_target
)
################################################################################################
# TRAINING
# Define model
model
=
keras
.
Sequential
()
model
.
add
(
LSTM
(
64
,
activation
=
'relu'
,
return_sequences
=
True
,
input_shape
=
(
1
,
dim_input
)))
model
.
add
(
LSTM
(
32
,
activation
=
'relu'
,
return_sequences
=
True
))
model
.
add
(
LSTM
(
16
,
activation
=
'relu'
,
return_sequences
=
True
))
model
.
add
(
Dense
(
dim_output
))
model
.
compile
(
optimizer
=
'adam'
,
loss
=
'mse'
)
# Train model
for
i
in
range
(
time_steps
):
model
.
fit
(
tf
.
reshape
(
train_input
[:,
i
],
shape
=
[
nb_samples
,
1
,
dim_input
]),
tf
.
reshape
(
train_target
[:,
i
],
shape
=
[
nb_samples
,
1
,
dim_output
]),
epochs
=
25
,
batch_size
=
100
,
verbose
=
2
)
print
(
i
)
# Make predictions
predictions
=
[]
# Iterate over time
for
t
in
range
(
time_steps
):
# Make prediction at current time t
prediction
=
model
.
predict
(
tf
.
reshape
(
test_target
[:,
t
,
:],
[
nb_samples
,
1
,
dim_output
]),
verbose
=
2
)
predictions
.
append
(
prediction
)
predictions
=
tf
.
stack
(
predictions
,
axis
=
1
)
predictions
=
tf
.
reshape
(
predictions
,
[
nb_samples
,
time_steps
,
dim_output
])
################################################################################################
# PLOT
predictions
=
tf
.
multiply
(
predictions
,
max_target
-
min_target
)
+
min_target
test_target
=
tf
.
multiply
(
test_target
,
max_target
-
min_target
)
+
min_target
test_input
=
tf
.
multiply
(
test_input
,
max_input
-
min_input
)
+
min_input
t
=
[(
5
/
(
time_steps
-
1
))
*
i
for
i
in
range
(
time_steps
)]
plt
.
plot
(
t
,
test_target
[
0
,
:,
2
],
color
=
'green'
,
label
=
'target'
)
plt
.
plot
(
t
,
predictions
[
0
,
:,
2
],
color
=
'red'
,
label
=
'prediction'
)
plt
.
plot
(
t
,
test_input
[
0
,
:,
2
],
color
=
'blue'
,
label
=
'input'
)
plt
.
legend
()
plt
.
show
()
Event Timeline
Log In to Comment