diff --git a/src/basicnet.cpp b/src/basicnet.cpp index 9f0f5f5..c91f211 100644 --- a/src/basicnet.cpp +++ b/src/basicnet.cpp @@ -1,88 +1,104 @@ #include #include #include #include "basicnet.h" using namespace eddl; #define NB_CHNS 4 #define L2_K 0.1 #define L2_B 0.1 #define L2_A 0.0 #define DROPOUT_RATE 0.5 BasicNet::BasicNet() { /* ######################################################### ######################################################### ######################################################### ######################################################### # ZERO PADDING NOT AVAILBLE YET # REGULARIZERS ARE AVAILABLE, BUT AS SIMPLE LAYERS: NOT AS PARAMETERS KERNEL_REGULARIZER AND BIAS_REGULARIZER: WHERE DO I PUT THE LAYERS FOR THEM TO HAVE THE SAME EFFECT? # FUNCTION TRAIN UNDERWENT MAJOR (?) CHANGES: fit function is much simpler than keras one # Maybe implemented in the futre with "fine-grained training" */ layer in_ = Input({1, NB_CHNS, 1280}); layer l = in_; // l = ZeroPadding2D(l, {1,2}); // l = Conv(l, 16, {3, 5}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b)) l = Conv(l, 16, {3, 5}, {1,2}, "same"); // padding='valid' l = BatchNormalization(l, 0.99, 0.001); l = Activation(l, "relu"); l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid' // l = ZeroPadding2D(l, {1,1}); // l = Conv(l, 32, {3, 3}, {1,1}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b)); l = Conv(l, 32, {3, 3}, {1,1}, "same"); // padding='valid' l = BatchNormalization(l, 0.99, 0.001); l = Activation(l, "relu"); l = MaxPool(l, {1,2}, {1,2}, "same"); // padding='valid' // l = ZeroPadding2D(l, {1,1}); // l = Conv(l, 32, {3, 3}, {2,2}, data_format="channels_last", kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b)); l = Conv(l, 32, {3, 3}, {2,2}, "same"); // padding='valid' l = BatchNormalization(l, 0.99, 0.001); l = Activation(l, "relu"); l = Dropout(l, DROPOUT_RATE); l = Flatten(l); // l = Dense(l, 64, kernel_initialiser='glorot_uniform', bias_initialiser='zeros', kernel_regularizer=L2(l2_k), bias_regularizer=L2(l2_b)); l = Dense(l, 64); l = Activation(l, "relu"); l = Dropout(l, DROPOUT_RATE); // l = Dense(l, 1, kernel_initialiser='glorot_uniform', bias_initialiser='zeros'); l = Dense(l, 1); l = Activation(l, "sigmoid"); layer out_ = l; net = Model({in_}, {out_}); build(net, sgd(0.01, 0.9, 0.0, true), {"soft_cross_entropy"}, {"categorical_accuracy"}, CS_CPU(4, "full_mem")); summary(net); } -void BasicNet::train(const vector x_train, const vector y_train, int batch_size, int epochs) +void BasicNet::train(const vector x, const vector y, int batch_size, int epochs) { + ////////////////////////////////////////// + ////////////////////////////////////////// + ////////////////////////////////////////// + // HERE SHOULD MOVE AXIS?????????????????? can be done with moveaxis (tensor.h ?) +// tensor x_train = eddlT::create(x); +// tensor y_train = eddlT::create(y); + +// fit(model, {x_train}, {y_train}, batch_size, epochs); } -void BasicNet::evaluate(const vector x_test, const vector y_test) +void BasicNet::evaluate(const vector x, const vector y) { + ////////////////////////////////////////// + ////////////////////////////////////////// + ////////////////////////////////////////// + // HERE SHOULD MOVE AXIS?????????????????? can be done with moveaxis (tensor.h ?) + +// tensor x_test = eddlT::create(x); +// tensor y_test = eddlT::create(y); +// evaluate(model, {x_test}, {y_test}); } diff --git a/src/basicnet.h b/src/basicnet.h index 907e340..f9dddf4 100644 --- a/src/basicnet.h +++ b/src/basicnet.h @@ -1,17 +1,17 @@ #include #include #include class BasicNet { public: BasicNet(); - void train(const vector x_train, const vector y_train, int batch_size, int epochs); + void train(const vector x, const vector y, int batch_size, int epochs); - void evaluate(const vector x_test, const vector y_test); + void evaluate(const vector x, const vector y); private: eddl::model net; }; \ No newline at end of file diff --git a/src/main.cpp b/src/main.cpp index 9bf5f14..3019827 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,6 +1,56 @@ +#include +#include +#include + + +#include + #include "basicnet.h" + +#define NB_CHNS 4 + +// Dataset position and choice +const std::string data_folder = "../dataset/"; +const std::string signal_length = "1mn"; + +// Processing parameters +const double validation_size = 0.15; +const int test_stride = 640; // Corresponds to 50% overlap, can be set to 1280 to have no overlap + +// Model parameters +const double lr = 0.00075; +const int epochs = 1; +const int batch_size = 32; + + int main() { + Tensor* x_data = Tensor::load_from_txt(data_folder + "signal_mit_" + signal_length + ".csv", ',', 0); + Tensor* y_data = Tensor::load_from_txt(data_folder + "labels_mit_" + signal_length + ".txt", ' ', 0); + + x_data->reshape_({x_data->shape[0], NB_CHNS, (int)(x_data->shape[1]/NB_CHNS)}); + +/* +info_data = np.loadtxt(data_folder+"infos_mit_"+signal_length+".txt", dtype="str") + + +#Create the pandas df +data = pd.concat([pd.Series(y_data), pd.Series([info[0] for info in info_data]),pd.Series([info[1] for info in info_data])], axis=1) +data.columns = ["label", "patient", "file"] +data["signal"] = "" +data["signal"].astype(object) +for i,sig in enumerate(x_data): + data.at[i,"signal"] = sig + + +patients = np.unique(data.patient) +data.sort_values(["patient", "file", "label"], inplace=True) +data = data.reset_index(drop=True) +#Load seizure times +seizures = pd.read_csv(data_folder+"seizures.csv", delimiter='\t') +seizures_ = seizures[seizures.Patient.isin(patients)] +seizures_["length"] = seizures_.apply(lambda x: (x.end_seizure - x.start_seizure), axis=1) +*/ } \ No newline at end of file