diff --git a/Data_Manipulation.py b/Data_Manipulation.py new file mode 100644 index 0000000..de4240e --- /dev/null +++ b/Data_Manipulation.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Main file to execute the model on the LPBF dataset +""" + +#%% +import torch +from torch.utils.data import Dataset, DataLoader,TensorDataset +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split# implementing train-test-split + +#%% Normalize the dataset + +def normalize(Features): + Features_1=np.load(Features) + df = pd.DataFrame(Features_1) + df = df.apply(lambda x: (x - np.mean(x))/np.std(x), axis=1) + df = df.to_numpy() + return df +#%% Convert into torch tensor + +class Mechanism(Dataset): + + def __init__(self,sequences): + self.sequences = sequences + + def __len__(self): + + return len(self.sequences) + + def __getitem__(self,idx): + + sequence_1,sequence_2,sequence_3,sequence_4,label = self.sequences [idx] + sequence_1=torch.Tensor(sequence_1) + sequence_2=torch.Tensor(sequence_2) + sequence_3=torch.Tensor(sequence_3) + sequence_4=torch.Tensor(sequence_4) + + sequence1 = sequence_1.view(1, -1) + sequence2 = sequence_2.view(1, -1) + sequence3 = sequence_3.view(1, -1) + sequence4 = sequence_4.view(1, -1) + + + sequence=torch.cat((sequence1, sequence2,sequence3, sequence4), 0) + + # print("sequence",sequence.shape) + label=torch.tensor(label).long() + # sequence,label + return sequence,label + +#%% +def data_prep(Rawspace_1,Rawspace_2,Rawspace_3,Rawspace_4,classspace): + + sequences=[] + for i in range(len(classspace)): + # print(i) + sequence_features_1 = Rawspace_1[i] + sequence_features_2 = Rawspace_2[i] + sequence_features_3 = Rawspace_3[i] + sequence_features_4 = Rawspace_4[i] + + label = classspace[i] + sequences.append((sequence_features_1,sequence_features_2,sequence_features_3,sequence_features_4,label)) + return sequences + + +#%% Loading the data for training the model +def dataloading_funtion(folder,window): + + rawfile_1 = str(folder)+'/'+'Channel0_'+str(window)+'.npy' + rawfile_2 = str(folder)+'/'+'Channel1_'+str(window)+'.npy' + rawfile_3 = str(folder)+'/'+'Channel2_'+str(window)+'.npy' + rawfile_4 = str(folder)+'/'+'Channel3_'+str(window)+'.npy' + classfile = str(folder)+'/'+'classspace_'+str(window)+'.npy' + + Rawspace_1 = normalize(rawfile_1).astype(np.float64) + Rawspace_2 = normalize(rawfile_2).astype(np.float64) + Rawspace_3 = normalize(rawfile_3).astype(np.float64) + Rawspace_4 = normalize(rawfile_4).astype(np.float64) + + classspace= np.load(classfile).astype(np.float64) + trainset,testset=data_batch_prep(Rawspace_1,Rawspace_2,Rawspace_3,Rawspace_4,classspace) + + return trainset,testset,classspace + + + + + +#%% + +def data_batch_prep(Rawspace_1,Rawspace_2,Rawspace_3,Rawspace_4,classspace): + + sequences_batch =[] + for i in range(len(classspace)): + # print(i) + + sequence_features_1 = Rawspace_1[i] + # sequence_features_1 = normalize(sequence_features_1) + sequence_features_2 = Rawspace_2[i] + # sequence_features_2 = normalize(sequence_features_2) + sequence_features_3 = Rawspace_3[i] + # sequence_features_3 = normalize(sequence_features_3) + sequence_features_4 = Rawspace_4[i] + # sequence_features_4 = normalize(sequence_features_4) + label = classspace[i] + + sequences_batch.append((sequence_features_1,sequence_features_2,sequence_features_3,sequence_features_4,label)) + + sequences_batch = Mechanism(sequences_batch) + train, test = train_test_split(sequences_batch, test_size=0.3,random_state=42) + trainset = torch.utils.data.DataLoader(train, batch_size=200, num_workers=0, + shuffle=True) + testset = torch.utils.data.DataLoader(test, batch_size=200, num_workers=0, + shuffle=True) + + return trainset, testset + + diff --git a/Main_Compute_Saliency.py b/Main_Compute_Saliency.py new file mode 100644 index 0000000..17a8ac4 --- /dev/null +++ b/Main_Compute_Saliency.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Main Utils file for saliency +""" + +#%% +import torch +from torch import optim, cuda +import torchvision.models as models +from torchvision import datasets +import torchvision +from torch.optim.lr_scheduler import StepLR +from torch.utils.data import Dataset, DataLoader,TensorDataset +torch.cuda.empty_cache() +from Utils import * +from Network import * +from Data_Manipulation import * +from Saliency_Utils import * + +#%% +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +print(device) + +PATH = './CNN_LSTM_Multivariate'+'.pth' +net = torch.load(PATH) +torch.backends.cudnn.enabled = False +net.to(device) + +#%% Testdataloader with batch size of 1 + +testset_2500=dataloading_funtion_saliency('data',2500) +torch.save(testset_2500, 'data/testset2500') +print('done 2500') + +testset_5000=dataloading_funtion_saliency('data',5000) +torch.save(testset_5000, 'data/testset5000') +print('done 5000') + +testset_7500=dataloading_funtion_saliency('data',7500) +torch.save(testset_7500, 'data/testset7500') +print('done 7500') + +testset_10000=dataloading_funtion_saliency('data',10000) +torch.save(testset_10000, 'data/testset10000') +print('done 10000') + + +#%% Compute saliencies across windows + +y_true2500,y_pred2500,saliencies2500 = window_saliency_results(testset_2500,net,device,'2500') +y_true5000,y_pred5000,saliencies5000 = window_saliency_results(testset_5000,net,device,'5000') +y_true7500,y_pred7500,saliencies7500 = window_saliency_results(testset_7500,net,device,'7500') +y_true10000,y_pred10000,saliencies10000 = window_saliency_results(testset_10000,net,device,'10000') + + +#%% Normalize across windows + + +testset_2500 = torch.load('data/testset2500') +testset_5000 = torch.load('data/testset5000') +testset_7500 = torch.load('data/testset7500') +testset_10000 = torch.load('data/testset10000') + + +saliencies2500 = torch.cat(torch.load('data/salienciesNorm2500'), 0).cpu() +saliencies5000 = torch.cat(torch.load('data/salienciesNorm5000'), 0).cpu() +saliencies7500 = torch.cat(torch.load('data/salienciesNorm7500'), 0).cpu() +saliencies10000 = torch.cat(torch.load('data/salienciesNorm10000'), 0).cpu() + +#%% + +saliencies2500PerCat=compute_normalize_window(saliencies2500,testset_2500,'data',2500) +saliencies5000PerCat=compute_normalize_window(saliencies5000,testset_5000,'data',5000) +saliencies7500PerCat=compute_normalize_window(saliencies7500,testset_7500,'data',7500) +saliencies1000PerCat=compute_normalize_window(saliencies10000,testset_10000,'data',10000) + + +distribution_plot(saliencies2500PerCat,"0.83") +distribution_plot(saliencies5000PerCat,"1.65") +distribution_plot(saliencies7500PerCat,"2.50") +distribution_plot(saliencies1000PerCat,"3.30") diff --git a/Main_Variable_Timescales.py b/Main_Variable_Timescales.py new file mode 100644 index 0000000..53af827 --- /dev/null +++ b/Main_Variable_Timescales.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Main file to execute the model on the LPBF dataset +""" + +#%% +#import libraries + +import torch +from torch.optim.lr_scheduler import StepLR +torch.cuda.empty_cache() + +import numpy as np +import pandas as pd +import random +import matplotlib.pyplot as plt +import os + +from Utils import * +from Network import * +from Data_Manipulation import * +from Saliency_Utils import * + +#%% +# Data--> https://polybox.ethz.ch/index.php/s/MUmJXXXBxpK1Ejc +# data = '../data/' #place the Data inside the folder +#Loading dataset + +trainset_2500,testset_2500,classspace_2500=dataloading_funtion('data',2500) +trainset_5000,testset_5000,classspace_5000=dataloading_funtion('data',5000) +trainset_7500,testset_7500,classspace_7500=dataloading_funtion('data',7500) +trainset_10000,testset_10000,classspace_10000=dataloading_funtion('data',10000) + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +print(device) + + +class_num = np.concatenate((classspace_2500, classspace_5000,classspace_7500,classspace_10000), axis=0) +values, counts = np.unique(class_num, return_counts=True) +class_weights=classweight(values,counts) +class_weights = torch.from_numpy(class_weights) + + +#%% Data Loader + +data_2500=[trainset_2500,testset_2500] +data_5000=[trainset_5000,testset_5000] +data_7500=[trainset_7500,testset_7500] +data_10000=[trainset_10000,testset_10000] + +Training_batch =[data_2500,data_5000,data_7500,data_10000] + +#%% Model initiation +epoch=10 +n_features = 10 +n_classes=3 +net= CNN(n_features, n_classes) +net.apply(initialize_weights) +net.to(device) +class_weights=class_weights.to(device,dtype=torch.float) + +print(class_weights.is_cuda) + +#%% Network Training + +costFunc = torch.nn.CrossEntropyLoss(weight=class_weights) +optimizer = torch.optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=0.003) +scheduler = StepLR(optimizer, step_size = 100, gamma= 0.3 ) + +Loss_value =[] +Train_loss =[] +Iteration_count=0 +iteration=[] +Epoch_count=0 +Total_Epoch =[] +Accuracy=[] +Learning_rate=[] +Training_loss_mean = [] +Training_loss_std = [] +Times=[] + +for epoch in range(epoch): + epoch_smoothing=[] + learingrate_value = get_lr(optimizer) + Learning_rate.append(learingrate_value) + closs = 0 + scheduler.step() + num=random.randint(0, 3) + print(num) + Times.append(num) + trainset,testset =Training_batch[num] + # print(trainset) + # print(testset) + + for i,batch in enumerate(trainset,0): + + data,output = batch + data,output = data.to(device,dtype=torch.float),output.to(device,dtype=torch.long) + # print("Input data",data.shape) + # print("Input label",output.shape) + # print("Modified Input label",torch.squeeze(output, 1)) + prediction = net(data) + # print("prediction label",prediction.shape) + loss = costFunc(prediction,output.squeeze()) #torch.Size([100, 3]),#torch.Size([100]) + + # Specify L1 and L2 weights + factor = 0.00005 + + # Compute L1 and L2 loss component + l1_crit = nn.L1Loss(size_average=False) + reg_loss = 0 + + for param in net.parameters(): + reg_loss += l1_crit(param,target=torch.zeros_like(param)) + + # Add L1 loss components + + loss += factor * reg_loss + # loss += l2 + + closs = loss.item() + optimizer.zero_grad() + loss.backward() + optimizer.step() + epoch_smoothing.append(closs) + + if i%100 == 0: + print('[%d %d] loss: %.4f'% (epoch+1,i+1,closs)) + + loss_train = closs / len(trainset) + Loss_value.append(loss_train) + + Training_loss_mean.append(np.mean(epoch_smoothing)) + Training_loss_std.append(np.std(epoch_smoothing)) + + correctHits=0 + total=0 + + for batches in testset: + + net.eval() + data,output = batches + data,output =data.to(device,dtype=torch.float),output.to(device,dtype=torch.long) + + output=output.squeeze() + # output=torch.squeeze(output, 1) + + prediction = net(data) + prediction = torch.argmax(prediction, dim=1) + total += output.size(0) + correctHits += (prediction==output).sum().item() + net.train() + + Epoch_count = epoch+1 + Total_Epoch.append (Epoch_count) + Epoch_accuracy = (correctHits/total)*100 + Accuracy.append(Epoch_accuracy) + print('Accuracy on epoch [%d] window [%d] : %.3f' %(epoch+1,data.shape[2],Epoch_accuracy)) + +PATH = './CNN_LSTM_Multivariate'+'.pth' +torch.save(net.state_dict(), PATH) +torch.save(net, PATH) +model = torch.load(PATH) + + + +#%% Training Plots + +iter_1 = '0.83 ms' +iter_2 = '1.65 ms' +iter_3 = '2.50 ms' +iter_4 = '3.3 ms' +class_names = [iter_1,iter_3,iter_3,iter_4] +plots(iteration,Loss_value,Total_Epoch,Accuracy,Learning_rate,Training_loss_mean,Training_loss_std,class_names,Times) +count_parameters(net) + + +#%% Confusio matrix + + +windowresults(testset_2500,model,device,'2500') +windowresults(testset_5000,model,device,'5000') +windowresults(testset_7500,model,device,'7500') +windowresults(testset_10000,model,device,'10000') + + +#%% Validation on unknow time-scales + +# trainset_4000,testset_4000,_=dataloading_funtion('data',4000) +# windowresults(testset_4000,model,device,'4000') + +# trainset_6000,testset_6000,_=dataloading_funtion('data',6000) +# windowresults(testset_6000,model,device,'6000') + +# trainset_9000,testset_9000,_=dataloading_funtion('data',9000) +# windowresults(testset_9000,model,device,'9000') + +# trainset_12000,testset_12000,_=dataloading_funtion('data',12000) +# windowresults(testset_12000,model,device,'12000') + +# trainset_1500,testset_1500,_=dataloading_funtion('data',1500) +# windowresults(testset_1500,model,device,'1500') + +#%% + diff --git a/Network.py b/Network.py new file mode 100644 index 0000000..c42e600 --- /dev/null +++ b/Network.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Network model +""" + +#%% + +from torch import nn, optim +from torch.nn import functional as F +from prettytable import PrettyTable + +class CNN(nn.Module): + + def __init__(self, n_features, n_classes, n_hidden=90, n_layers=1): + super().__init__() + + self.conv1 = nn.Conv1d(in_channels=4, out_channels=8, kernel_size=16) + self.bn1 = nn.BatchNorm1d(8) + + self.conv2 = nn.Conv1d(in_channels=8, out_channels=16, kernel_size=16) + self.bn2 = nn.BatchNorm1d(16) + + self.conv3 = nn.Conv1d(in_channels=16, out_channels=32, kernel_size=16) + self.bn3 = nn.BatchNorm1d(32) + + self.conv4 = nn.Conv1d(32, 64, kernel_size=16) + self.bn4 = nn.BatchNorm1d(64) + + self.conv5 = nn.Conv1d(64, out_channels=n_features, kernel_size=16) + self.bn5 = nn.BatchNorm1d(n_features) + + self.pool = nn.MaxPool1d(2) + + self.dropout = nn.Dropout(0.025) + self.lstm = nn.LSTM(input_size=n_features, + hidden_size=n_hidden, + num_layers=n_layers, + batch_first=True, + dropout=0.2 + ) + self.classifier = nn.Linear(n_hidden, n_classes) + + + def forward(self, x): + + # print(x.shape) + x = F.relu(self.bn1(self.conv1(x))) + x = self.dropout(x) + # print(x.shape) + x = self.pool(x) + # print(x.shape) + + + x = F.relu(self.bn2(self.conv2(x))) + x = self.dropout(x) + # print(x.shape) + x = self.pool(x) + # print(x.shape) + + + x = F.relu(self.bn3(self.conv3(x))) + x = self.dropout(x) + # print(x.shape) + x = self.pool(x) + # print(x.shape) + + x = F.relu(self.bn4(self.conv4(x))) + x = self.dropout(x) + # print(x.shape) + x = self.pool(x) + # print(x.shape) + + + x = F.relu(self.bn5(self.conv5(x))) + # print(x.shape) + x = self.pool(x) + # print(x.shape) + + # torch.Size([1, 4, 5000])--> torch.Size([1, 10, 141]) + # torch.Size([1, 4, 10000])-->torch.Size([1, 10, 297]) + + x = x.permute(0, 2, 1) + + # print(x.shape) #torch.Size([100, 5000, 1]) + + x,(_,hidden, ) = self.lstm(x) + out = hidden[-1] + + return self.classifier(out) + +#%% + + +def initialize_weights(m): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_uniform_(m.weight.data,nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias.data, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight.data, 1) + nn.init.constant_(m.bias.data, 0) + elif isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight.data) + nn.init.constant_(m.bias.data, 0) +#%% + + +def get_lr(optimizer): + for param_group in optimizer.param_groups: + print('Learning rate =') + print(param_group['lr']) + return param_group['lr'] + +#%% +def count_parameters(model): + table = PrettyTable(["Modules", "Parameters"]) + total_params = 0 + for name, parameter in model.named_parameters(): + if not parameter.requires_grad: continue + param = parameter.numel() + table.add_row([name, param]) + total_params+=param + print(table) + print(f"Total Trainable Params: {total_params}") + return total_params \ No newline at end of file diff --git a/Saliency_Utils.py b/Saliency_Utils.py new file mode 100644 index 0000000..3e20be6 --- /dev/null +++ b/Saliency_Utils.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Utils file for saliency +""" + +#%% +import torch +import matplotlib.pyplot as plt +from mlxtend.plotting import plot_confusion_matrix +import numpy as np +from Data_Manipulation import * +# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +import pandas as pd +# print(device) + + +#%% Loading the data for saliency model +def dataloading_funtion_saliency(folder,window): + + # folder='data' + rawfile_1 = str(folder)+'/'+'Channel0_'+str(window)+'.npy' + rawfile_2 = str(folder)+'/'+'Channel1_'+str(window)+'.npy' + rawfile_3 = str(folder)+'/'+'Channel2_'+str(window)+'.npy' + rawfile_4 = str(folder)+'/'+'Channel3_'+str(window)+'.npy' + classfile = str(folder)+'/'+'classspace_'+str(window)+'.npy' + + Rawspace_1 = normalize(rawfile_1).astype(np.float64) + Rawspace_2 = normalize(rawfile_2).astype(np.float64) + Rawspace_3 = normalize(rawfile_3).astype(np.float64) + Rawspace_4 = normalize(rawfile_4).astype(np.float64) + + classspace= np.load(classfile).astype(np.float64) + + trainset,testset=data_batch_prep_saliency(Rawspace_1,Rawspace_2,Rawspace_3,Rawspace_4,classspace) + + return testset + +#%% + +def data_batch_prep_saliency(Rawspace_1,Rawspace_2,Rawspace_3,Rawspace_4,classspace): + + sequences_batch =[] + for i in range(len(classspace)): + # print(i) + + sequence_features_1 = Rawspace_1[i] + # sequence_features_1 = normalize(sequence_features_1) + + sequence_features_2 = Rawspace_2[i] + # sequence_features_2 = normalize(sequence_features_2) + + sequence_features_3 = Rawspace_3[i] + # sequence_features_3 = normalize(sequence_features_3) + + sequence_features_4 = Rawspace_4[i] + # sequence_features_4 = normalize(sequence_features_4) + + label = classspace[i] + + sequences_batch.append((sequence_features_1,sequence_features_2,sequence_features_3,sequence_features_4,label)) + + sequences_batch = Mechanism(sequences_batch) + + train, test = train_test_split(sequences_batch, test_size=0.3, random_state=42) + + + trainset = torch.utils.data.DataLoader(train, batch_size=200, num_workers=0, + shuffle=True) + + testset = torch.utils.data.DataLoader(test, batch_size=1, num_workers=0, + shuffle=True) + + return trainset, testset + + +#%% + +def compute_saliency_time(input_sample, model, relative=False): + # Prepare the input + input_sample.requires_grad_() + if (torch.cuda.is_available()): + input_sample = input_sample.cuda() + # Collect output from the model + output = model(input_sample) + #print(output.shape) + # Require the gradient + output.requires_grad_() + # Collect the unit responsible for the classification + output_max = output.max(1)[0] + #print(output_max.shape) + # Retain grads + output_max.retain_grad() + input_sample.retain_grad() + + # print(output_max) + # Compute the gradients + output_max.backward() + # Collect gradient + grad = input_sample.grad + # slc = (grad - grad.min())/(grad.max()-grad.min()) + # Compute abs value + slc = torch.abs(grad) + + if relative: + eps = 1e-5 + slc = slc/(torch.abs(input_sample)+eps) + + saliency = slc.detach() + input_sample.grad = torch.zeros_like(grad) + return saliency + + +def window_saliency_results(testset,model,device,window): + + y_pred = [] + y_true = [] + saliencies = [] + model.eval() + count = 0 + # iterate over test data + for batches in testset: + + data,output = batches + data,output = data.to(device,dtype=torch.float),output.to(device,dtype=torch.long) + output = output.squeeze() + + if count%100==0: + print('Computing prediction for sample: ', count) + + prediction = model(data) + prediction = torch.argmax(prediction, dim=1) + + prediction = prediction.data.cpu().numpy() + output = output.data.cpu().numpy() + + y_true.append(output) # Save Truth + y_pred.append(prediction) # Save Prediction + + if count%100==0: + print('Computing saliency for sample: ', count) + + saliency = compute_saliency_time(data, model, relative=True) + saliencies.append(saliency) + count += 1 + + + # classes = ('LoF pores', 'Conduction mode', 'Keyhole pores') + # plotname= 'CNN_LSTM_MultivariateNormRel_'+str(window)+'_confusion_matrix'+'.png' + # plt.figure() + # plot_confusion_matrix(y_true, y_pred,classes,plotname) + # torch.save(y_true, 'y_trueNormRel'+str(window)) + # torch.save(y_pred, 'y_predNormRel'+str(window)) + + torch.save(y_true, 'data/y_trueNorm'+str(window)) + torch.save(y_pred, 'data/y_predNorm'+str(window)) + torch.save(saliencies, 'data/salienciesNorm'+str(window)) + + return y_true,y_pred,saliencies + +def compute_normalize_window(saliences,testset,folder,window): + medianAmpl = [] + for datum in testset: + singleDatum, _ = datum + medianAmpl.append(torch.median(torch.abs(singleDatum), dim=2, keepdim=True)[0]) + medianAmpl = torch.cat(medianAmpl, dim=0) + medianAmpl.shape + # print(medianAmpl.shape) + # print(saliences.shape) + saliences = saliences/medianAmpl + salienceCat = torch.median(saliences, dim=2, keepdim=False)[0] + salienceCat + salienceCat.shape + title=str(folder)+'/'+'saliencies'+str(window)+'PerCat' + torch.save(salienceCat, title) + + return salienceCat \ No newline at end of file diff --git a/Utils.py b/Utils.py new file mode 100644 index 0000000..f758189 --- /dev/null +++ b/Utils.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 22:10:18 2020 +--------------------------------------------------------------------- +-- Author: Vigneashwara Pandiyan +--------------------------------------------------------------------- +Utils file for visualization/ Plots +""" + +#%% + +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +import torch +from sklearn.metrics import confusion_matrix +import seaborn as sns +import pandas as pd + +#%% + +def plot_confusion_matrix(y_true, y_pred,classes,plotname): + + # Build confusion matrix + cm = confusion_matrix(y_true, y_pred) + # Normalise + cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] + cmn=cmn*100 + + fig, ax = plt.subplots(figsize=(12,9)) + sns.set(font_scale=3) + b=sns.heatmap(cmn, annot=True, fmt='.1f', xticklabels=classes, yticklabels=classes,cmap="coolwarm",linewidths=0.1,annot_kws={"size": 25},cbar_kws={'label': 'Classification Accuracy %'}) + for b in ax.texts: b.set_text(b.get_text() + " %") + plt.ylabel('Actual',fontsize=25) + plt.xlabel('Predicted',fontsize=25) + plt.margins(0.2) + ax.set_yticklabels(ax.get_yticklabels(), rotation=90, va="center", fontsize= 20) + ax.set_xticklabels(ax.get_xticklabels(), va="center",fontsize= 20) + # plt.setp(ax.get_yticklabels(), rotation='vertical') + plotname=str(plotname) + plt.savefig(plotname,bbox_inches='tight') + plt.show() + plt.clf() + +#%% + +def plots(iteration,Loss_value,Total_Epoch,Accuracy,Learning_rate,Training_loss_mean,Training_loss_std,class_names,Times): + + + iteration = np.array(iteration) + Loss_value = np.array(Loss_value) + Total_Epoch = np.array(Total_Epoch) + Accuracy = np.array(Accuracy) + Learning_rate = np.array(Learning_rate) + Training_loss_mean = np.array(Training_loss_mean) + Training_loss_std = np.array(Training_loss_std) + Times = np.array(Times) + + Accuracyfile = 'Accuracy'+'.npy' + Lossfile = 'Loss_value'+'.npy' + Timesfile = 'Times'+'.npy' + + np.save(Timesfile,Times,allow_pickle=True) + np.save(Accuracyfile,Accuracy,allow_pickle=True) + np.save(Lossfile,Loss_value, allow_pickle=True) + + + fig, ax = plt.subplots() + plt.plot(Loss_value,'r',linewidth =2.0) + # ax.fill_between(Loss_value, Training_loss_mean - Training_loss_std, Training_loss_mean + Training_loss_std, alpha=0.9) + plt.title('Iteration vs Loss_Value') + plt.xlabel('Iteration') + plt.ylabel('Loss_Value') + plot_1= 'Loss_value_'+ '.png' + plt.savefig(plot_1, dpi=600,bbox_inches='tight') + plt.show() + plt.clf() + + plt.figure(2) + plt.plot(Total_Epoch,Accuracy,'g',linewidth =2.0) + plt.title('Total_Epoch vs Accuracy') + plt.xlabel('Epochs') + plt.ylabel('Accuracy') + plot_2= 'Accuracy_'+'.png' + plt.savefig(plot_2, dpi=600,bbox_inches='tight') + plt.show() + + plt.figure(3) + plt.plot(Total_Epoch,Learning_rate,'b',linewidth =2.0) + plt.title('Total_Epoch vs Learning_Rate') + plt.xlabel('Epochs') + plt.ylabel('Learning_Rate') + plot_3= 'Learning_rate_'+ '.png' + plt.savefig(plot_3, dpi=600,bbox_inches='tight') + plt.show() + + graphname='Iteration'+'_weightage'+'.png' + fig, ax = plt.subplots(figsize=(7,5), dpi=100) + ax = sns.countplot(Times,palette=["#fbab17", "#0515bf", "#10a310", "#e9150d"]) + ax.set_xticklabels(class_names); + ax.xaxis.label.set_size(10) + plt.savefig(graphname,bbox_inches='tight',pad_inches=0.1,dpi=800) + plt.show() + plt.clf() + + + + +#%% + +def classweight(values,counts): + class_weight=[] + tot=sum(counts) + for group in counts: + value=1-(group/tot) + print(value) + class_weight.append(value) + class_weight = np.array(class_weight) + return class_weight + +#%% + +def windowresults(testset,model,device,window): + + y_pred = [] + y_true = [] + model.eval() + # iterate over test data + for batches in testset: + + data,output = batches + data,output =data.to(device,dtype=torch.float),output.to(device,dtype=torch.long) + output=output.squeeze() + # print("output",output) + prediction = model(data) + + prediction = torch.argmax(prediction, dim=1) + # print("prediction",prediction) + prediction=prediction.data.cpu().numpy() + output=output.data.cpu().numpy() + + y_true.extend(output) # Save Truth + y_pred.extend(prediction) # Save Prediction + + + classes = ('LoF pores', 'Conduction mode', 'Keyhole pores') + + + plotname= 'CNN_LSTM_Multivariate_'+str(window)+'_confusion_matrix'+'.png' + + plt.figure() + plot_confusion_matrix(y_true, y_pred,classes,plotname) + +#%% + +def distribution_plot(data,window_length): + + data=data.cpu().detach().numpy() + df = pd.DataFrame(data, columns=['Back reflection', 'Infra red','Visible', 'Acoustic signal']) + # df=df.div(df.sum(axis=1), axis=0) + sns.set(style="white") + fig=plt.subplots(figsize=(5,3), dpi=800) + + # sns.displot(data, kind="kde", multiple="stack",alpha=.5,) + fig = sns.kdeplot(df['Back reflection'], shade=True,alpha=.5, color="red") + fig = sns.kdeplot(df['Infra red'], shade=True,alpha=.5, color="green") + fig = sns.kdeplot(df['Visible'], shade=True,alpha=.5, color="#0000FF") + fig = sns.kdeplot(df['Acoustic signal'], shade=True, alpha=.5,color="#FFD700") + plt.title("Saliency distribution across sensors \n for a window length of "+str(window_length)+" ms") + plt.legend(labels=["Back reflection","Infra-red","Visible","Acoustic signal"]) + title=str(window_length)+'_'+'.png' + plt.xlim([0.0, 0.35]) + plt.ylim([0.0, 100]) + plt.xlabel('Derivative relative amplitude (r.u)') + plt.savefig(title, bbox_inches='tight') + plt.show() \ No newline at end of file