Page MenuHomec4science

basicnet.cpp
No OneTemporary

File Metadata

Created
Sun, Sep 8, 07:56

basicnet.cpp

#include <eddl/apis/eddl.h>
#include <eddl/apis/eddlT.h>
#include <eddl/tensor/tensor.h>
#include <vector>
#include "basicnet.h"
#define NB_CHNS 4
#define L2_K 0.1
#define L2_B 0.1
#define L2_A 0.0
#define DROPOUT_RATE 0.5
BasicNet::BasicNet()
{
using namespace eddl;
/* #########################################################
#########################################################
#########################################################
#########################################################
# ZERO PADDING NOT AVAILBLE YET
# REGULARIZERS ARE AVAILABLE, BUT AS SIMPLE LAYERS: NOT AS PARAMETERS KERNEL_REGULARIZER AND BIAS_REGULARIZER: WHERE DO I PUT THE LAYERS FOR THEM TO HAVE THE SAME EFFECT?
# FUNCTION TRAIN UNDERWENT MAJOR (?) CHANGES: fit function is much simpler than keras one
# Maybe implemented in the futre with "fine-grained training"
*/
layer in_ = Input({1, NB_CHNS, 1280});
layer l = in_;
// l = ZeroPadding2D(l, {1,2});
// l = Conv(l, 16, {3, 5}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b))
l = Conv(l, 16, {3, 5}, {1,2}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid'
// l = ZeroPadding2D(l, {1,1});
// l = Conv(l, 32, {3, 3}, {1,1}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Conv(l, 32, {3, 3}, {1,1}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid'
// l = ZeroPadding2D(l, {1,1});
// l = Conv(l, 32, {3, 3}, {2,2}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Conv(l, 32, {3, 3}, {2,2}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = Dropout(l, DROPOUT_RATE);
l = Flatten(l);
// l = Dense(l, 64, kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Dense(l, 64);
l = Activation(l, "relu");
l = Dropout(l, DROPOUT_RATE);
// l = Dense(l, 1, kernel_initialiser='glorot_uniform', bias_initialiser='zeros');
l = Dense(l, 1);
l = Activation(l, "sigmoid");
layer out_ = l;
net = Model({in_}, {out_});
build(net,
sgd(0.01, 0.9, 0.0, true),
{"soft_cross_entropy"},
{"categorical_accuracy"},
CS_CPU(4, "full_mem"));
summary(net);
}
void BasicNet::fit(Tensor* x, Tensor* y, int batch_size, int epochs)
{
x = Tensor::moveaxis(x, 3, 1);
/* // If no pre split validation set is provided, done autamatically here. But may result in class imbalance for small datasets
if(val_data==None):
eddl.fit(model, [x_train], [y_train], batch_size, epochs)
else:
val_data = [val_data[0], val_data[1]]
eddl.fit(model, [x_train], [y_train], batch_size, epochs)
return
*/
eddl::fit(net, {x}, {y}, batch_size, epochs);
}
void BasicNet::evaluate(Tensor* x, Tensor* y)
{
x = Tensor::moveaxis(x, 3, 1);
eddl::evaluate(net, {x}, {y});
}
/*
#Calculate accuracy on test set
def compute_accuracy(y_pred_, y_true_):
return 1 - np.sum(np.abs(np.max(np.round(y_pred_), axis=1) - np.array(y_true_)))/len(y_true_)
def false_positive_rate(y_test, y_pred, detect_rule):
time_hr = len(y_pred)*1280/(256*60*60)
preds = np.max(np.rint(y_pred), axis=1)
fp=0
x_ = np.zeros(detect_rule[1])
alarm_triggered = False
counter_alarm = 23 #Needs 1mn seconds between seizure onsets
false_alarms = []
for idx, x in enumerate(preds):
if(counter_alarm == 0):
alarm_triggered = False
else:
counter_alarm -= 1
if (alarm_triggered == False):
for j, y in enumerate(x_[::-1]):
if(j == len(x_)-1):
x_[1] = x_[0]
x_[0] = 0
else:
x_[len(x_)-1-j] = x_[len(x_)-2-j]
if(x==1):
x_[0]=1
if(np.sum(x_) >= detect_rule[0]):
fp+=1
alarm_triggered = True
counter_alarm = 23
false_alarms.append(idx)
fpr = fp/time_hr
return fpr, fp, false_alarms
#Compute detection time: first time to have 2 out of 3 segments being classified as ictal
def compute_detect_time(preds, test, detect_rule):
x_ = np.zeros(detect_rule[1])
for idx, x in enumerate(preds):
for j, y in enumerate(x_[::-1]):
if(j == len(x_)-1):
x_[1] = x_[0]
x_[0] = 0
else:
x_[len(x_)-1-j] = x_[len(x_)-2-j]
if(x==1):
x_[0]=1
if(np.sum(x_) >= detect_rule[0]):
return idx
return -1
*/

Event Timeline