Page MenuHomec4science

basicnet.cpp
No OneTemporary

File Metadata

Created
Thu, Jun 6, 17:37

basicnet.cpp

#include <eddl/apis/eddl.h>
#include <eddl/apis/eddlT.h>
#include <vector>
#include "basicnet.h"
using namespace eddl;
#define NB_CHNS 4
#define L2_K 0.1
#define L2_B 0.1
#define L2_A 0.0
#define DROPOUT_RATE 0.5
BasicNet::BasicNet()
{
/* #########################################################
#########################################################
#########################################################
#########################################################
# ZERO PADDING NOT AVAILBLE YET
# REGULARIZERS ARE AVAILABLE, BUT AS SIMPLE LAYERS: NOT AS PARAMETERS KERNEL_REGULARIZER AND BIAS_REGULARIZER: WHERE DO I PUT THE LAYERS FOR THEM TO HAVE THE SAME EFFECT?
# FUNCTION TRAIN UNDERWENT MAJOR (?) CHANGES: fit function is much simpler than keras one
# Maybe implemented in the futre with "fine-grained training"
*/
layer in_ = Input({1, NB_CHNS, 1280});
layer l = in_;
// l = ZeroPadding2D(l, {1,2});
// l = Conv(l, 16, {3, 5}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b))
l = Conv(l, 16, {3, 5}, {1,2}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid'
// l = ZeroPadding2D(l, {1,1});
// l = Conv(l, 32, {3, 3}, {1,1}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Conv(l, 32, {3, 3}, {1,1}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid'
// l = ZeroPadding2D(l, {1,1});
// l = Conv(l, 32, {3, 3}, {2,2}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Conv(l, 32, {3, 3}, {2,2}, "same"); // padding='valid'
l = BatchNormalization(l, 0.99, 0.001);
l = Activation(l, "relu");
l = Dropout(l, DROPOUT_RATE);
l = Flatten(l);
// l = Dense(l, 64, kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b));
l = Dense(l, 64);
l = Activation(l, "relu");
l = Dropout(l, DROPOUT_RATE);
// l = Dense(l, 1, kernel_initialiser='glorot_uniform', bias_initialiser='zeros');
l = Dense(l, 1);
l = Activation(l, "sigmoid");
layer out_ = l;
net = Model({in_}, {out_});
build(net,
sgd(0.01, 0.9, 0.0, true),
{"soft_cross_entropy"},
{"categorical_accuracy"},
CS_CPU(4, "full_mem"));
summary(net);
}
void BasicNet::train(const vector<Tensor*> x_train, const vector<Tensor*> y_train, int batch_size, int epochs)
{
}
void BasicNet::evaluate(const vector<Tensor*> x_test, const vector<Tensor*> y_test)
{
}

Event Timeline