diff --git a/EventDetection.py b/EventDetection.py index 534d2c2..d71e97c 100644 --- a/EventDetection.py +++ b/EventDetection.py @@ -1,306 +1,316 @@ import NanoporeClasses as NC import shelve import os import glob from time import sleep import Functions from pprint import pprint import matplotlib.pyplot as plt from matplotlib.ticker import EngFormatter import numpy as np import sys import argparse import datetime from tkinter.filedialog import askopenfilenames,askdirectory import timeit import LoadData timeInSec= EngFormatter(unit='s', places=2) #Default parameters extension = '*.log' coefficients = {'a': 0.999, 'E': 0, 'S': 5, - 'eventlengthLimit': 200e-3, + 'maxEventLength': 200e-3, 'minEventLength': 500e-6, 'hbook': 1, 'delta': 0.2e-9, 'ChimeraLowPass': 10e3} +def GetParameters(): + print("Usage:") + print("run(inputData, newExtension=None, newCoefficients={}, outputFile=None, force=False, cut=False, verbose=False)") + print() + print("Default extension:") + print(extension) + print() + print("Default coefficients:") + pprint(coefficients) + def batcheventdetection(folder,extension,coefficients, verbose=True, forceRun=False, CutTraces=False): #Create new class that contains all events AllEvents=NC.AllEvents() print('found ' + str(len(glob.glob(os.path.join(folder,extension)))) + ' files.') #Loop over all files in folder for fullfilename in glob.glob(os.path.join(folder,extension)): #Extract filename and generate filename to save located events if verbose: print('analysing '+fullfilename) filename, file_extension = os.path.splitext(os.path.basename(fullfilename)) directory=os.path.dirname(fullfilename) + os.sep + 'analysisfiles' if not os.path.exists(directory): os.makedirs(directory,exist_ok=True) savefilename=directory + os.sep + filename + 'data' shelfFile = shelve.open(savefilename) if ~forceRun: try: #Check if file can be loaded coefficientsloaded=shelfFile['coefficients'] tEL=shelfFile['translocationEvents'] # If coefficients before are not identical, analysis needs to run again if (coefficientsloaded==coefficients): if verbose: print('loaded from file') else: forceRun = True except: #Except if cannot be loaded, analysis needs to run forceRun=True pass if forceRun: #Extract list of events for this file tEL=eventdetection(fullfilename,coefficients,verbose, CutTraces) if verbose: print('Saved {} events'.format(len(tEL.events))) #Open savefile and save events for this file shelfFile['translocationEvents']=tEL shelfFile['coefficients']=coefficients if verbose: print('saved to file') shelfFile.close() #Add events to the initially created class that contains all events AllEvents.AddEvent(tEL) return AllEvents def eventdetection(fullfilename, coefficients, verbose=True, CutTraces=False, showFigures = False): if 'ChimeraLowPass' in coefficients: ChimeraLowPass=coefficients['ChimeraLowPass'] else: ChimeraLowPass=None loadedData=LoadData.OpenFile(fullfilename, ChimeraLowPass, True, CutTraces) minTime=coefficients['minEventLength'] IncludedBaseline = int(1e-2 * loadedData['samplerate']) delta=coefficients['delta'] hbook=coefficients['hbook'] if verbose: print('Recursive lowpass...', end='') #Call RecursiveLowPassFast to detect events in current trace start_time = timeit.default_timer() if 'i1Cut' in loadedData: events=[] for cutTrace in loadedData['i1Cut']: events.extend(Functions.RecursiveLowPassFast(cutTrace,coefficients,loadedData['samplerate'])) else: events=Functions.RecursiveLowPassFast(loadedData['i1'],coefficients,loadedData['samplerate']) if verbose: print('done. Calculation took {}'.format(timeInSec.format_data(timeit.default_timer() - start_time))) print('nr events: {}'.format(len(events))) #CusumParameters = [delta sigma Normalize ImpulsionLimit IncludedBaseline TooMuchLevels IncludedBaselinePlots 1 # SamplingFrequency]; #[EventDatabase] = event_detection(RawSignal, CusumParameters, RoughEventLocations, lowpasscutoff); #Make a new class translocationEventList that contains all the found events in this file translocationEventList=NC.AllEvents() #Plot current file if showFigures: plt.figure(1) plt.cla() timeVals=np.linspace(0, len(loadedData['i1'])/loadedData['samplerate'], num=len(loadedData['i1'])) plt.plot(timeVals,loadedData['i1']) plt.draw() plt.pause(0.001) if verbose: print('CUSUM fitting...', end='') #Call RecursiveLowPassFast to detect events in current trace start_time = timeit.default_timer() cusumEvents = 0 #Loop over all detected events for event in events: beginEvent = event[0] endEvent = event[1] localBaseline = event[2] stdEvent = event[3] lengthevent=endEvent-beginEvent # Check if the start of the event is > IncludedBaseline then compare eventtime to minimal and maxima; if beginEvent > IncludedBaseline: # Extract trace and extract baseline Trace = loadedData['i1'][int(beginEvent):int(endEvent)] traceBefore = loadedData['i1'][int(beginEvent) - IncludedBaseline:int(beginEvent)] traceAfter = loadedData['i1'][int(endEvent):int(endEvent) + IncludedBaseline] if lengthevent<=(minTime*loadedData['samplerate']) and lengthevent>3: newEvent = NC.TranslocationEvent(fullfilename,'Impulse') # Add Trace, mean of the event,the samplerate, coefficients and baseline to the New Event class newEvent.SetEvent(Trace, beginEvent,localBaseline, loadedData['samplerate']) if 'blockLength' in loadedData: voltI = int(beginEvent/loadedData['blockLength']) else: voltI = int(0) newEvent.SetCoefficients(coefficients, loadedData['v1'][voltI]) newEvent.SetBaselineTrace(traceBefore, traceAfter) # Add event to TranslocationList translocationEventList.AddEvent(newEvent) - elif lengthevent>(minTime*loadedData['samplerate']) and lengthevent<(coefficients['eventlengthLimit']*loadedData['samplerate']): + elif lengthevent>(minTime*loadedData['samplerate']) and lengthevent<(coefficients['maxEventLength']*loadedData['samplerate']): #Make new event class #CUSUM fit sigma = np.sqrt(stdEvent) h = hbook * delta / sigma (mc, kd, krmv)= Functions.CUSUM(loadedData['i1'][int(beginEvent) - IncludedBaseline: int(endEvent) + IncludedBaseline], delta, h) krmv = [krmvVal + int(beginEvent) - IncludedBaseline + 1 for krmvVal in krmv] #Add Trace, mean of the event,the samplerate, coefficients and baseline to the New Event class if len(krmv)>2: newEvent = NC.TranslocationEvent(fullfilename, 'Real') Trace=loadedData['i1'][int(krmv[0]):int(krmv[-1])] traceBefore = loadedData['i1'][int(krmv[0]) - IncludedBaseline:int(krmv[0])] traceAfter = loadedData['i1'][int(krmv[-1]):int(krmv[-1]) + IncludedBaseline] beginEvent=krmv[0] else: newEvent = NC.TranslocationEvent(fullfilename, 'Rough') newEvent.SetEvent(Trace,beginEvent,localBaseline,loadedData['samplerate']) newEvent.SetBaselineTrace(traceBefore, traceAfter) newEvent.SetCUSUMVariables(mc, kd, krmv) cusumEvents+=1 if 'blockLength' in loadedData: voltI = int(beginEvent/loadedData['blockLength']) else: voltI = int(0) newEvent.SetCoefficients(coefficients,loadedData['v1'][voltI]) #Add event to TranslocationList translocationEventList.AddEvent(newEvent) if verbose: print('done. Calculation took {}'.format(timeInSec.format_data(timeit.default_timer() - start_time))) print('{} events fitted'.format(cusumEvents)) #Plot events if True if showFigures: minVal=np.max(loadedData['i1'])#-1.05e-9 #np.min(loadedData['i1']) maxVal=np.max(loadedData['i1'])+0.05e-9#-1.05e-9 #np.min(loadedData['i1']) for i in range(len(events)): beginEvent=events[i][0] endEvent = events[i][1] if beginEvent>100 and (endEvent - beginEvent) >= (minTime * loadedData['samplerate']) and (endEvent - beginEvent) < ( - coefficients['eventlengthLimit'] * loadedData['samplerate']): + coefficients['maxEventLength'] * loadedData['samplerate']): plt.plot([beginEvent/loadedData['samplerate'], beginEvent/loadedData['samplerate']], [minVal, maxVal], 'y-', lw=1) #plt.draw() plt.show() #plt.pause(1) return translocationEventList def LoadEvents(loadname): #Check if loadname is a directory or not if os.path.isdir(loadname): #create standard filename for loading loadFile = os.path.join(loadname,os.path.basename(loadname)+'_Events') else: loadFile, file_extension = os.path.splitext(loadname) #Open file and extract TranslocationEvents shelfFile=shelve.open(loadFile) TranslocationEvents=shelfFile['TranslocationEvents'] shelfFile.close() AllEvents=NC.AllEvents() AllEvents.AddEvent(TranslocationEvents) AllEvents.SetFolder(loadname) return AllEvents def run(inputData, newExtension=None, newCoefficients={}, outputFile=None, force=False, cut=False, verbose=False): if newExtension is None: newExtension = extension for key in newCoefficients: coefficients[key]=newCoefficients[key] if os.path.isdir(inputData): TranslocationEvents=batcheventdetection(inputData, newExtension, coefficients, verbose, force, cut) else: TranslocationEvents=eventdetection(inputData,coefficients, verbose, cut) #Check if list is empty if outputFile is not None and TranslocationEvents.events: if os.path.isdir(inputData): outputData = inputData + os.sep + 'Data' + os.sep + 'Data' + datetime.date.today().strftime("%Y%m%d") LoadData.SaveVariables(outputFile,TranslocationEvents=TranslocationEvents) return True else: return TranslocationEvents if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', help='Input directory or file') parser.add_argument('-e', '--ext', help='Extension for input directory') parser.add_argument('-o', '--output', help='Output file for saving') parser.add_argument('-c', '--coeff', help='Coefficients for selecting events [-C filter E standarddev maxlength minlength', nargs = '+') parser.add_argument('-u', '--cut', help='Cut Traces before detecting event, prevent detecting appended chunks as event', action='store_true') parser.add_argument('-f', '--force', help='Force analysis to run (don''t load from file', action='store_true') args = parser.parse_args() inputData=args.input if inputData==None: inputData=askdirectory() outputData=args.output if outputData==None: if os.path.isdir(inputData): outputData = inputData + os.sep + 'Data' + os.sep + 'Data' + datetime.date.today().strftime("%Y%m%d") else: outputData=os.path.dirname(inputData) + os.sep + 'Data' + os.sep + 'Data' + datetime.date.today().strftime("%Y%m%d") if args.coeff is not None: if len(args.coeff) % 2 == 0: for i in range(0, len(args.coeff), 2): if i <= len(args.coeff): coefficients[str(args.coeff[i])]=float(args.coeff[i+1]) if args.ext is not None: extension = args.ext print('Loading from: ' + inputData) print('Saving to: ' + outputData) print('Coefficients are: ', end='') pprint(coefficients) if os.path.isdir(inputData): print('extension is: ' + extension +'\nStarting.... \n') TranslocationEvents=batcheventdetection(inputData, extension, coefficients, args.force, args.cut) else: print('Starting.... \n') TranslocationEvents=eventdetection(inputData,coefficients, args.cut) #Check if list is empty if TranslocationEvents.events: LoadData.SaveVariables(outputData,TranslocationEvents=TranslocationEvents) diff --git a/Functions.py b/Functions.py index a680a41..7f24445 100644 --- a/Functions.py +++ b/Functions.py @@ -1,889 +1,889 @@ # -*- coding: utf-8 -*- import math import os import pickle as pkl from scipy import signal import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy import scipy.signal as sig from matplotlib.ticker import EngFormatter from numpy import linalg as lin from scipy import constants as cst from scipy.optimize import curve_fit def LowPass(data, samplerate, lowPass): Wn = round(2 * lowPass / samplerate, 4) # [0,1] nyquist frequency b, a = signal.bessel(4, Wn, btype='low', analog=False) # 4-th order digital filter z, p, k = signal.tf2zpk(b, a) eps = 1e-9 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) Filt_sig = (signal.filtfilt(b, a, data, method='gust', irlen=approx_impulse_len)) ds_factor = np.ceil(samplerate / (2 * lowPass)) output = {} output['data'] = scipy.signal.resample(Filt_sig, int(len(data) / ds_factor)) output['samplerate'] = samplerate / ds_factor return output def GetKClConductivity(Conc, Temp): p = pkl.load(open('KCl_ConductivityValues.p', 'rb')) if Conc == 1.0: Conc = np.uint(1) return np.polyval(p[str(Conc)], Temp) def GetTempFromKClConductivity(Conc, Cond): p = pkl.load(open('KCl_ConductivityValues.p', 'rb')) if Conc == 1.0: Conc = np.uint(1) return (Cond - p[str(Conc)][1]) / p[str(Conc)][0] def RecursiveLowPassFast(signal, coeff, samplerate): padlen = np.uint64(samplerate) prepadded = np.ones(padlen) * np.mean(signal[0:1000]) signaltofilter = np.concatenate((prepadded, signal)) mltemp = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signaltofilter) vltemp = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signaltofilter - mltemp)) ml = np.delete(mltemp, np.arange(padlen)) vl = np.delete(vltemp, np.arange(padlen)) sl = ml - coeff['S'] * np.sqrt(vl) Ni = len(signal) points = np.array(np.where(signal <= sl)[0]) to_pop = np.array([]) for i in range(1, len(points)): if points[i] - points[i - 1] == 1: to_pop = np.append(to_pop, i) points = np.unique(np.delete(points, to_pop)) RoughEventLocations = [] NumberOfEvents = 0 for i in points: if NumberOfEvents is not 0: if i >= RoughEventLocations[NumberOfEvents - 1][0] and i <= RoughEventLocations[NumberOfEvents - 1][1]: continue NumberOfEvents += 1 start = i El = ml[i] + coeff['E'] * np.sqrt(vl[i]) Mm = ml[i] Vv = vl[i] duration = 0 endp = start if (endp + 1) < len(signal): - while signal[endp + 1] < El and endp < (Ni - 2): # and duration < coeff['eventlengthLimit']*samplerate: + while signal[endp + 1] < El and endp < (Ni - 2): # and duration < coeff['maxEventLength']*samplerate: duration += 1 endp += 1 - if duration >= coeff['eventlengthLimit'] * samplerate or endp > ( + if duration >= coeff['maxEventLength'] * samplerate or endp > ( Ni - 10): # or duration <= coeff['minEventLength'] * samplerate: NumberOfEvents -= 1 continue else: k = start while signal[k] < Mm and k > 1: k -= 1 start = k - 1 k2 = i + 1 # while signal[k2] > Mm: # k2 -= 1 # endp = k2 if start < 0: start = 0 RoughEventLocations.append((start, endp, ml[start], vl[start])) return np.array(RoughEventLocations) # , ml, vl, sl def RecursiveLowPassFastUp(signal, coeff, samplerate): ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml)) sl = ml + coeff['S'] * np.sqrt(vl) Ni = len(signal) points = np.array(np.where(signal >= sl)[0]) to_pop = np.array([]) for i in range(1, len(points)): if points[i] - points[i - 1] == 1: to_pop = np.append(to_pop, i) points = np.delete(points, to_pop) points = np.delete(points, np.array(np.where(points == 0)[0])) RoughEventLocations = [] NumberOfEvents = 0 for i in points: if NumberOfEvents is not 0: if i >= RoughEventLocations[NumberOfEvents - 1][0] and i <= RoughEventLocations[NumberOfEvents - 1][1]: continue NumberOfEvents += 1 start = i El = ml[i] + coeff['E'] * np.sqrt(vl[i]) Mm = ml[i] duration = 0 - while signal[i + 1] > El and i < (Ni - 2) and duration < coeff['eventlengthLimit'] * samplerate: + while signal[i + 1] > El and i < (Ni - 2) and duration < coeff['maxEventLength'] * samplerate: duration += 1 i += 1 - if duration >= coeff['eventlengthLimit'] * samplerate or i > (Ni - 10): + if duration >= coeff['maxEventLength'] * samplerate or i > (Ni - 10): NumberOfEvents -= 1 else: k = start while signal[k] > Mm and k > 2: k -= 1 start = k - 1 k2 = i + 1 while signal[k2] > Mm: k2 -= 1 endp = k2 RoughEventLocations.append((start, endp, ml[start], vl[start])) return np.array(RoughEventLocations) def Reshape1DTo2D(inputarray, buffersize): npieces = int(len(inputarray) / buffersize) voltages = np.array([], dtype=np.float64) currents = np.array([], dtype=np.float64) for i in range(1, npieces + 1): if i % 2 == 1: currents = np.append(currents, inputarray[(i - 1) * buffersize:i * buffersize - 1], axis=0) # print('Length Currents: {}'.format(len(currents))) else: voltages = np.append(voltages, inputarray[(i - 1) * buffersize:i * buffersize - 1], axis=0) # print('Length Voltages: {}'.format(len(voltages))) v1 = np.ones((len(voltages)), dtype=np.float64) i1 = np.ones((len(currents)), dtype=np.float64) v1[:] = voltages i1[:] = currents out = {'v1': v1, 'i1': i1} print('Currents:' + str(v1.shape)) print('Voltages:' + str(i1.shape)) return out def MakeIVData(output, approach='mean', delay=0.7, UseVoltageRange=0, verbose=False): (ChangePoints, sweepedChannel) = CutDataIntoVoltageSegments(output, verbose) if ChangePoints is 0: return 0 if output['graphene']: currents = ['i1', 'i2'] else: currents = ['i1'] Values = output[sweepedChannel][ChangePoints] Values = np.append(Values, output[sweepedChannel][::-1][0]) delayinpoints = np.int64(delay * output['samplerate']) if delayinpoints > ChangePoints[0]: raise ValueError("Delay is longer than Changepoint") # Store All Data All = {} for current in currents: Item = {} ItemExp = {} l = len(Values) Item['Voltage'] = np.zeros(l) Item['StartPoint'] = np.zeros(l, dtype=np.uint64) Item['EndPoint'] = np.zeros(l, dtype=np.uint64) Item['Mean'] = np.zeros(l) Item['STD'] = np.zeros(l) Item['SE'] = np.zeros(l) Item['SweepedChannel'] = sweepedChannel ItemExp['SweepedChannel'] = sweepedChannel Item['YorkFitValues'] = {} ItemExp['YorkFitValues'] = {} ### MEAN METHOD### # First item Item['Voltage'][0] = Values[0] trace = output[current][0 + delayinpoints:ChangePoints[0]] Item['StartPoint'][0] = 0 + delayinpoints Item['EndPoint'][0] = ChangePoints[0] Item['Mean'][0] = np.mean(trace) isProblematic = math.isclose(Values[0], 0, rel_tol=1e-3) or math.isclose(Values[0], np.min(Values), rel_tol=1e-3) or math.isclose( Values[0], np.max(Values), rel_tol=1e-3) if sweepedChannel is 'v2' and isProblematic: print('In Problematic If for Values: {}, index {}'.format(Values[0], 0)) Item['STD'][0] = np.std(trace[-np.int64(output['samplerate']):]) else: Item['STD'][0] = np.std(trace) Item['SE'][0] = Item['STD'][0] / np.sqrt(len(trace)) for i in range(1, len(Values) - 1): trace = output[current][ChangePoints[i - 1] + delayinpoints: ChangePoints[i]] Item['Voltage'][i] = Values[i] Item['StartPoint'][i] = ChangePoints[i - 1] + delayinpoints Item['EndPoint'][i] = ChangePoints[i] if len(trace) > 0: Item['Mean'][i] = np.mean(trace) isProblematic = math.isclose(Values[i], 0, rel_tol=1e-3) or math.isclose(Values[i], np.min(Values), rel_tol=1e-3) or math.isclose( Values[i], np.max(Values), rel_tol=1e-3) if sweepedChannel is 'v2' and isProblematic: print('In Problematic If for Values: {}, index {}'.format(Values[i], i)) Item['STD'][i] = np.std(trace[-np.int64(output['samplerate']):]) else: Item['STD'][i] = np.std(trace) Item['SE'][i] = Item['STD'][i] / np.sqrt(len(trace)) if verbose: print('{}, {},{}'.format(i, ChangePoints[i - 1] + delayinpoints, ChangePoints[i])) else: Item['Mean'][i] = np.NaN Item['STD'][i] = np.NaN # Last if 1: trace = output[current][ChangePoints[len(ChangePoints) - 1] + delayinpoints: len(output[current]) - 1] Item['Voltage'][-1:] = Values[len(Values) - 1] Item['StartPoint'][-1:] = ChangePoints[len(ChangePoints) - 1] + delayinpoints Item['EndPoint'][-1:] = len(output[current]) - 1 if len(trace) > 0: Item['Mean'][-1:] = np.mean(trace) isProblematic = math.isclose(Values[-1:], 0, rel_tol=1e-3) or math.isclose(Values[-1:], np.min(Values), rel_tol=1e-3) or math.isclose( Values[-1:], np.max(Values), rel_tol=1e-3) if sweepedChannel is 'v2' and isProblematic: print('In Problematic If for Values: {}, index {}'.format(Values[-1:], i)) Item['STD'][-1:] = np.std(trace[-np.int64(output['samplerate']):]) else: Item['STD'][-1:] = np.std(trace) else: Item['Mean'][-1:] = np.NaN Item['STD'][-1:] = np.NaN Item['SE'][-1:] = Item['STD'][-1:] / np.sqrt(len(trace)) ## GET RECTIFICATION FACTOR parts = {'pos': Item['Voltage'] > 0, 'neg': Item['Voltage'] < 0} a = {} b = {} sigma_a = {} sigma_b = {} b_save = {} x_values = {} for part in parts: (a[part], b[part], sigma_a[part], sigma_b[part], b_save[part]) = YorkFit( Item['Voltage'][parts[part]].flatten(), Item['Mean'][parts[part]].flatten(), 1e-12 * np.ones(len(Item['Voltage'][parts[part]].flatten())), Item['STD'][parts[part]].flatten()) factor = b['neg'] / b['pos'] Item['Rectification'] = factor ItemExp['Rectification'] = factor ###EXPONENTIAL METHOD### if approach is not 'mean': l = 1000 ItemExp['StartPoint'] = np.zeros(l, dtype=np.uint64) ItemExp['EndPoint'] = np.zeros(l, dtype=np.uint64) baselinestd = np.std(output[current][0:np.int64(1 * output['samplerate'])]) movingstd = pd.rolling_std(output[current], 10) # Extract The Desired Parts ind = (np.abs(movingstd - baselinestd) < 1e-9) & (np.abs(output[current]) < np.max(output[current]) / 2) # How Many Parts? restart = True counter = 0 for i, value in enumerate(ind): if value and restart: ItemExp['StartPoint'][counter] = i print('Start: {}\t'.format(i)) restart = False elif not value and not restart: if ((i - 1) - ItemExp['StartPoint'][counter]) > 1 * output['samplerate']: ItemExp['EndPoint'][counter] = i - 1 print('End: {}'.format(i - 1)) restart = True counter += 1 else: restart = True if not restart: counter += 1 ItemExp['EndPoint'][counter] = len(ind) ItemExp['StartPoint'] = ItemExp['StartPoint'][np.where(ItemExp['StartPoint'])] ItemExp['EndPoint'] = ItemExp['EndPoint'][np.where(ItemExp['EndPoint'])] NumberOfPieces = len(ItemExp['StartPoint']) ItemExp['Voltage'] = np.zeros(NumberOfPieces) ItemExp['ExpValues'] = np.zeros((3, NumberOfPieces)) ItemExp['STD'] = np.zeros(NumberOfPieces) ItemExp['Mean'] = np.zeros(NumberOfPieces) # Make It Piecewise for i in np.arange(0, NumberOfPieces): trace = output[current][ItemExp['StartPoint'][i]:ItemExp['EndPoint'][i]] popt, pcov = MakeExponentialFit(np.arange(len(trace)) / output['samplerate'], trace) ItemExp['Voltage'][i] = output[sweepedChannel][ItemExp['StartPoint'][i]] if popt[0]: ItemExp['ExpValues'][:, i] = popt ItemExp['Mean'][i] = popt[2] try: ItemExp['STD'][i] = np.sqrt(np.diag(pcov))[2] except RuntimeWarning: ItemExp['STD'][i] = np.std(trace) / np.sqrt(len(trace)) print('ExpFit: STD of voltage {} failed calculating...'.format(ItemExp['Voltage'][i])) else: print('Exponential Fit on for ' + current + ' failed at V=' + str(ItemExp['Voltage'][0])) ItemExp['Mean'][i] = np.mean(trace) ItemExp['STD'][i] = np.std(trace) ## FIT THE EXP CALCULATED VALUES ### (a, b, sigma_a, sigma_b, b_save) = YorkFit(ItemExp['Voltage'], ItemExp['Mean'], 1e-12 * np.ones(len(ItemExp['Voltage'])), ItemExp['STD']) x_fit = np.linspace(min(Item['Voltage']), max(Item['Voltage']), 1000) y_fit = scipy.polyval([b, a], x_fit) ItemExp['YorkFitValues'] = {'x_fit': x_fit, 'y_fit': y_fit, 'Yintercept': a, 'Slope': b, 'Sigma_Yintercept': sigma_a, 'Sigma_Slope': sigma_b, 'Parameter': b_save} All[current + '_Exp'] = ItemExp ##Remove NaNs nans = np.isnan(Item['Mean'][:]) nans = np.logical_not(nans) Item['Voltage'] = Item['Voltage'][nans] Item['StartPoint'] = Item['StartPoint'][nans] Item['EndPoint'] = Item['EndPoint'][nans] Item['Mean'] = Item['Mean'][nans] Item['STD'] = Item['STD'][nans] Item['SE'] = Item['SE'][nans] ## Restrict to set voltage Range if UseVoltageRange is not 0: print('Voltages Cut') ind = np.argwhere((Item['Voltage'] >= UseVoltageRange[0]) & (Item['Voltage'] <= UseVoltageRange[1])) Item['Voltage'] = Item['Voltage'][ind].flatten() Item['StartPoint'] = Item['StartPoint'][ind].flatten() Item['EndPoint'] = Item['EndPoint'][ind].flatten() Item['Mean'] = Item['Mean'][ind].flatten() Item['STD'] = Item['STD'][ind].flatten() Item['SE'] = Item['SE'][ind].flatten() ## FIT THE MEAN CALCULATED VALUES ### # Yorkfit (a, b, sigma_a, sigma_b, b_save) = YorkFit(Item['Voltage'].flatten(), Item['Mean'].flatten(), 1e-9 * np.ones(len(Item['Voltage'])), Item['STD'].flatten()) x_fit = np.linspace(min(Item['Voltage']), max(Item['Voltage']), 1000) y_fit = scipy.polyval([b, a], x_fit) Item['YorkFit'] = {'x_fit': x_fit, 'y_fit': y_fit, 'Yintercept': a, 'Slope': b, 'Sigma_Yintercept': sigma_a, 'Sigma_Slope': sigma_b, 'Parameter': b_save} # Polyfit p = np.polyfit(Item['Voltage'].flatten(), Item['Mean'].flatten(), 1) x_fit2 = np.linspace(min(Item['Voltage']), max(Item['Voltage']), 1000) y_fit2 = scipy.polyval(p, x_fit) Item['PolyFit'] = {'x_fit': x_fit2, 'y_fit': y_fit2, 'Yintercept': p[1], 'Slope': p[0]} All[current] = Item All['Currents'] = currents return All def ExpFunc(x, a, b, c): return a * np.exp(-b * x) + c def YorkFit(X, Y, sigma_X, sigma_Y, r=0): N_itermax = 10 # maximum number of interations tol = 1e-15 # relative tolerance to stop at N = len(X) temp = np.matrix([X, np.ones(N)]) # make initial guess at b using linear squares tmp = np.matrix(Y) * lin.pinv(temp) b_lse = np.array(tmp)[0][0] # a_lse=tmp(2); b = b_lse # initial guess omega_X = np.true_divide(1, np.power(sigma_X, 2)) omega_Y = np.true_divide(1, np.power(sigma_Y, 2)) alpha = np.sqrt(omega_X * omega_Y) b_save = np.zeros(N_itermax + 1) # vector to save b iterations in b_save[0] = b for i in np.arange(N_itermax): W = omega_X * omega_Y / (omega_X + b * b * omega_Y - 2 * b * r * alpha) X_bar = np.sum(W * X) / np.sum(W) Y_bar = np.sum(W * Y) / np.sum(W) U = X - X_bar V = Y - Y_bar beta = W * (U / omega_Y + b * V / omega_X - (b * U + V) * r / alpha) b = sum(W * beta * V) / sum(W * beta * U) b_save[i + 1] = b if np.abs((b_save[i + 1] - b_save[i]) / b_save[i + 1]) < tol: break a = Y_bar - b * X_bar x = X_bar + beta y = Y_bar + b * beta x_bar = sum(W * x) / sum(W) y_bar = sum(W * y) / sum(W) u = x - x_bar # %v=y-y_bar sigma_b = np.sqrt(1 / sum(W * u * u)) sigma_a = np.sqrt(1. / sum(W) + x_bar * x_bar * sigma_b * sigma_b) return (a, b, sigma_a, sigma_b, b_save) def DoubleExpFunc(x, Y0, percentFast, plateau, Kfast, Kslow): SpanFast = (Y0 - plateau) * percentFast * 0.01 SpanSlow = (Y0 - plateau) * (100 - percentFast) * 0.01 return plateau + ((Y0 - plateau) * percentFast * 0.01) * np.exp(-Kfast * x) + ( (Y0 - plateau) * (100 - percentFast) * 0.01) * np.exp(-Kslow * x) def MakeExponentialFit(xdata, ydata): try: popt, pcov = curve_fit(ExpFunc, xdata, ydata, method='lm', xtol=1e-20, ftol=1e-12, gtol=1e-20) # popt, pcov = curve_fit(DoubleExpFunc, xdata, ydata, p0 = (ydata[0], 50, ydata[-1], 1 / 15, 1 / 60)) return (popt, pcov) except RuntimeError: popt = (0, 0, 0) pcov = 0 return (popt, pcov) def CutDataIntoVoltageSegments(output, verbose=False): sweepedChannel = '' if output['type'] == 'ChimeraNotRaw' or (output['type'] == 'Axopatch' and not output['graphene']): ChangePoints = np.where(np.diff(output['v1']))[0] sweepedChannel = 'v1' if len(ChangePoints) is 0: print('No voltage sweeps in this file:\n{}'.format(output['filename'])) return (0, 0) elif (output['type'] == 'Axopatch' and output['graphene']): ChangePoints = np.where(np.diff(output['v1']))[0] sweepedChannel = 'v1' if len(ChangePoints) is 0: ChangePoints = np.where(np.diff(output['v2']))[0] if len(ChangePoints) is 0: print('No voltage sweeps in this file') return (0, 0) else: sweepedChannel = 'v2' if verbose: print('Cutting into Segments...\n{} change points detected in channel {}...'.format(len(ChangePoints), sweepedChannel)) return (ChangePoints, sweepedChannel) def CalculatePoreSize(G, L, s): # https://doi.org/10.1088/0957-4484/22/31/315101 return (G + np.sqrt(G * (G + 16 * L * s / np.pi))) / (2 * s) def CalculateCapillarySize(G, D=0.3e-3, t=3.3e-3, s=10.5): # DOI: 10.1021/nn405029j # s=conductance (10.5 S/m at 1M KCl) # t=taper length (3.3 mm) # D=diameter nanocapillary at the shaft (0.3 mm) return G * (4 * t / np.pi + 0.5 * D) / (s * D - 0.5 * G) def CalculateShrunkenCapillarySize(G, D=0.3e-3, t=3.3e-3, s=10.5, ts=500e-9, Ds=500e-9): # DOI: 10.1021/nn405029j # s=conductance (10.5 S/m at 1M KCl) # t=taper length (3.3 mm) # D=diameter nanocapillary at the shaft (0.3 mm) # ts=taper length of the shrunken part (543nm fit) # Ds=diameter nanocapillary at the shrunken part (514nm fit) return G * D * (8 * ts / np.pi + Ds) / ((2 * s * D * Ds) - (G * (8 * t / np.pi + Ds))) def CalculateConductance(L, s, d): return s * 1 / (4 * L / (np.pi * d ** 2) + 1 / d) def GetSurfaceChargeFromLeeEquation(s, c, D, G, L, A, B, version=1): lDu = 1 / (8 * np.pi * B * D * s) * ( version * np.sqrt( (4 * np.pi * A * D ** 2 * s + np.pi * B * D ** 2 * s - 4 * B * G * L - 8 * np.pi * D * G) ** 2 - 16 * np.pi * B * D * s * (np.pi * A * D ** 3 * s - 4 * A * D * G * L - 2 * np.pi * D ** 2 * G)) - 4 * np.pi * A * D ** 2 * s - np.pi * B * D ** 2 * s + 4 * B * G * L + 8 * np.pi * D * G ) e = cst.elementary_charge Na = cst.Avogadro return lDu * (2 * c * Na * 1e3 * e) def ConductivityFromConductanceModel(L, d, G): return G * (4 * L / (np.pi * d ** 2) + 1 / d) def RefinedEventDetection(out, AnalysisResults, signals, limit): for sig in signals: # Choose correct reference if sig is 'i1_Up': sig1 = 'i1' elif sig is 'i2_Up': sig1 = 'i2' else: sig1 = sig if sig is 'i1' or 'i1_Up': volt = 'v1' elif sig is 'i2' or 'i2_Up': volt = 'v2' if len(AnalysisResults[sig]['RoughEventLocations']) > 1: startpoints = np.uint64(AnalysisResults[sig]['RoughEventLocations'][:, 0]) endpoints = np.uint64(AnalysisResults[sig]['RoughEventLocations'][:, 1]) localBaseline = AnalysisResults[sig]['RoughEventLocations'][:, 2] localVariance = AnalysisResults[sig]['RoughEventLocations'][:, 3] CusumBaseline = 500 numberofevents = len(startpoints) AnalysisResults[sig]['StartPoints'] = startpoints AnalysisResults[sig]['EndPoints'] = endpoints AnalysisResults[sig]['LocalBaseline'] = localBaseline AnalysisResults[sig]['LocalVariance'] = localVariance AnalysisResults[sig]['NumberOfEvents'] = len(startpoints) deli = np.zeros(numberofevents) dwell = np.zeros(numberofevents) fitvalue = np.zeros(numberofevents) AllEvents = [] #####FIX THE UP AND DOWN STORY!! ALL THE PROBLEMS ARE IN HERE... for i in range(numberofevents): length = endpoints[i] - startpoints[i] if out[sig1][startpoints[i]] < 0 and (sig == 'i1_Up' or sig == 'i2_Up'): if length <= limit and length > 3: # Impulsion Fit to minimal value fitvalue[i] = np.max(out[sig1][startpoints[i] + np.uint(1):endpoints[i] - np.uint(1)]) elif length > limit: fitvalue[i] = np.mean(out[sig1][startpoints[i] + np.uint(5):endpoints[i] - np.uint(5)]) else: fitvalue[i] = np.max(out[sig1][startpoints[i]:endpoints[i]]) deli[i] = -(localBaseline[i] - fitvalue[i]) elif out[sig1][startpoints[i]] < 0 and (sig == 'i1' or sig == 'i2'): if length <= limit and length > 3: # Impulsion Fit to minimal value fitvalue[i] = np.min(out[sig1][startpoints[i] + np.uint(1):endpoints[i] - np.uint(1)]) elif length > limit: fitvalue[i] = np.mean(out[sig1][startpoints[i] + np.uint(5):endpoints[i] - np.uint(5)]) else: fitvalue[i] = np.min(out[sig1][startpoints[i]:endpoints[i]]) deli[i] = -(localBaseline[i] - fitvalue[i]) elif out[sig1][startpoints[i]] > 0 and (sig == 'i1_Up' or sig == 'i2_Up'): if length <= limit and length > 3: # Impulsion Fit to minimal value fitvalue[i] = np.max(out[sig1][startpoints[i] + np.uint(1):endpoints[i] - np.uint(1)]) elif length > limit: fitvalue[i] = np.mean(out[sig1][startpoints[i] + np.uint(5):endpoints[i] - np.uint(5)]) else: fitvalue[i] = np.max(out[sig1][startpoints[i]:endpoints[i]]) deli[i] = (localBaseline[i] - fitvalue[i]) elif out[sig1][startpoints[i]] > 0 and (sig == 'i1' or sig == 'i2'): if length <= limit and length > 3: # Impulsion Fit to minimal value fitvalue[i] = np.min(out[sig1][startpoints[i] + np.uint(1):endpoints[i] - np.uint(1)]) elif length > limit: fitvalue[i] = np.mean(out[sig1][startpoints[i] + np.uint(5):endpoints[i] - np.uint(5)]) else: fitvalue[i] = np.min(out[sig1][startpoints[i]:endpoints[i]]) deli[i] = (localBaseline[i] - fitvalue[i]) else: print( 'Strange event that has to come from a neighboring universe...Computer will self-destruct in 3s!') dwell[i] = (endpoints[i] - startpoints[i]) / out['samplerate'] AllEvents.append(out[sig1][startpoints[i]:endpoints[i]]) frac = deli / localBaseline dt = np.array(0) dt = np.append(dt, np.diff(startpoints) / out['samplerate']) numberofevents = len(dt) # AnalysisResults[sig]['CusumFits'] = AllFits AnalysisResults[sig]['FractionalCurrentDrop'] = frac AnalysisResults[sig]['DeltaI'] = deli AnalysisResults[sig]['DwellTime'] = dwell AnalysisResults[sig]['Frequency'] = dt AnalysisResults[sig]['LocalVoltage'] = out[volt][startpoints] AnalysisResults[sig]['AllEvents'] = AllEvents AnalysisResults[sig]['FitLevel'] = fitvalue else: AnalysisResults[sig]['NumberOfEvents'] = 0 return AnalysisResults def MakeIV(filenames, directory, conductance=10, title=''): if len(conductance) is 1: conductance = np.ones(len(filenames)) * conductance for idx, filename in enumerate(filenames): print(filename) # Make Dir to save images output = LoadData.OpenFile(filename) if not os.path.exists(directory): os.makedirs(directory) AllData = MakeIVData(output, delay=2) if AllData == 0: print('!!!! No Sweep in: ' + filename) continue # Plot Considered Part # figExtracteParts = plt.figure(1) # ax1 = figExtracteParts.add_subplot(211) # ax2 = figExtracteParts.add_subplot(212, sharex=ax1) # (ax1, ax2) = uf.PlotExtractedPart(output, AllData, current = 'i1', unit=1e9, axis = ax1, axis2=ax2) # plt.show() # figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.eps') # figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.png', dpi=150) # Plot IV if output['graphene']: figIV2 = plt.figure(3) ax2IV = figIV2.add_subplot(111) ax2IV = PlotIV(output, AllData, current='i2', unit=1e9, axis=ax2IV, WithFit=1, title=title[idx]) figIV2.tight_layout() figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.png', dpi=300) figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.eps') figIV2.clear() figIV = plt.figure(2) ax1IV = figIV.add_subplot(111) ax1IV = PlotIV(output, AllData, current='i1', unit=1e9, axis=ax1IV, WithFit=1, PoreSize=[conductance[idx], 20e-9], title=title[idx]) figIV.tight_layout() # Save Figures figIV.savefig(directory + os.sep + str(os.path.split(filename)[1]) + 'IV_i1.png', dpi=300) figIV.savefig(directory + os.sep + str(os.path.split(filename)[1]) + 'IV_i1.eps') figIV.clear() def TwoChannelAnalysis(filenames, outdir, UpwardsOn=0): for filename in filenames: print(filename) inp = LoadData.OpenFile(filename) file = os.sep + str(os.path.split(filename)[1][:-4]) if not os.path.exists(outdir): os.makedirs(outdir) # Low Pass Event Detection AnalysisResults = {} if inp['graphene']: chan = ['i1', 'i2'] # For looping over the channels else: chan = ['i1'] for sig in chan: AnalysisResults[sig] = {} AnalysisResults[sig]['RoughEventLocations'] = RecursiveLowPassFast(inp[sig], pm.coefficients[sig], inp['samplerate']) if UpwardsOn: # Upwards detection can be turned on or off AnalysisResults[sig + '_Up'] = {} AnalysisResults[sig + '_Up']['RoughEventLocations'] = RecursiveLowPassFastUp(inp[sig], pm.coefficients[sig], inp['samplerate']) # f.PlotRecursiveLPResults(AnalysisResults['i2'], inp, directory, 1e-3, channel='i2') # f.PlotRecursiveLPResults(AnalysisResults['i2_Up'], inp, directory+'UP_', 1e-3, channel='i2') # Refine the Rough Event Detection done by the LP filter and Add event infos AnalysisResults = RefinedEventDetection(inp, AnalysisResults, signals=chan, limit=pm.MinimalFittingLimit * inp['samplerate']) # Correlate the two channels if inp['graphene']: (CommonIndexes, OnlyIndexes) = f.CorrelateTheTwoChannels(AnalysisResults, 10e-3 * inp['samplerate']) print('\n\nAnalysis Done...\nThere are {} common events\n{} Events on i1 only\n{} Events on i2 only'.format( len(CommonIndexes['i1']), len(OnlyIndexes['i1']), len(OnlyIndexes['i2']))) # Plot The Events f.SaveAllPlots(CommonIndexes, OnlyIndexes, AnalysisResults, directory, inp, pm.PlotBuffer) else: # Plot The Events f.SaveAllAxopatchEvents(AnalysisResults, directory, inp, pm.PlotBuffer) def PlotExtractedPart(output, AllData, current='i1', unit=1e9, axis='', axis2=''): time = np.arange(0, len(output[current])) / output['samplerate'] axis.plot(time, output[current] * unit, 'b', label=str(os.path.split(output['filename'])[1])[:-4]) axis.set_ylabel('Current ' + current + ' [nA]') axis.set_title('Time Trace') for i in range(0, len(AllData[current]['StartPoint'])): axis.plot(time[AllData[current]['StartPoint'][i]:AllData[current]['EndPoint'][i]], output[current][AllData[current]['StartPoint'][i]:AllData[current]['EndPoint'][i]] * unit, 'r') axis2.plot(time, output[AllData[current]['SweepedChannel']], 'b', label=str(os.path.split(output['filename'])[1])[:-4]) axis2.set_ylabel('Voltage ' + AllData[current]['SweepedChannel'] + ' [V]') axis2.set_xlabel('Time') return (axis, axis2) def xcorr(x, y, k, normalize=True): n = x.shape[0] # initialize the output array out = np.empty((2 * k) + 1, dtype=np.double) lags = np.arange(-k, k + 1) # pre-compute E(x), E(y) mu_x = x.mean() mu_y = y.mean() # loop over lags for ii, lag in enumerate(lags): # use slice indexing to get 'shifted' views of the two input signals if lag < 0: xi = x[:lag] yi = y[-lag:] elif lag > 0: xi = x[:-lag] yi = y[lag:] else: xi = x yi = y # x - mu_x; y - mu_y xdiff = xi - mu_x ydiff = yi - mu_y # E[(x - mu_x) * (y - mu_y)] out[ii] = xdiff.dot(ydiff) / n # NB: xdiff.dot(ydiff) == (xdiff * ydiff).sum() if normalize: # E[(x - mu_x) * (y - mu_y)] / (sigma_x * sigma_y) out /= np.std(x) * np.std(y) return lags, out def CUSUM(input, delta, h): # initialization Nd = k0 = 0 kd = [] krmv = [] k = 1 l = len(input) m = np.zeros(l) m[k0] = input[k0] v = np.zeros(l) sp = np.zeros(l) Sp = np.zeros(l) gp = np.zeros(l) sn = np.zeros(l) Sn = np.zeros(l) gn = np.zeros(l) while k < l: m[k] = np.mean(input[k0:k + 1]) v[k] = np.var(input[k0:k + 1]) sp[k] = delta / v[k] * (input[k] - m[k] - delta / 2) sn[k] = -delta / v[k] * (input[k] - m[k] + delta / 2) Sp[k] = Sp[k - 1] + sp[k] Sn[k] = Sn[k - 1] + sn[k] gp[k] = np.max([gp[k - 1] + sp[k], 0]) gn[k] = np.max([gn[k - 1] + sn[k], 0]) if gp[k] > h or gn[k] > h: kd.append(k) if gp[k] > h: kmin = np.argmin(Sp[k0:k + 1]) krmv.append(kmin + k0) else: kmin = np.argmin(Sn[k0:k + 1]) krmv.append(kmin + k0) # Re-initialize k0 = k m[k0] = input[k0] v[k0] = sp[k0] = Sp[k0] = gp[k0] = sn[k0] = Sn[k0] = gn[k0] = 0 Nd = Nd + 1 k += 1 if Nd == 0: mc = np.mean(input) * np.ones(k) elif Nd == 1: mc = np.append(m[krmv[0]] * np.ones(krmv[0]), m[k - 1] * np.ones(k - krmv[0])) else: mc = m[krmv[0]] * np.ones(krmv[0]) for ii in range(1, Nd): mc = np.append(mc, m[krmv[ii]] * np.ones(krmv[ii] - krmv[ii - 1])) mc = np.append(mc, m[k - 1] * np.ones(k - krmv[Nd - 1])) return (mc, kd, krmv) def CondPoreSizeTick(x, pos): formCond = EngFormatter(unit='S') formPoreSize = EngFormatter(unit='m') outstring = '{}, {}'.format(formCond.format_eng(x), formPoreSize.format_eng(CalculatePoreSize(x, 1e-9, 10))) return outstring def DebyeHuckelParam(c, T=20): ## Valid Only for same valency and c1 = c2 epsilon_r = 87.740 - 0.40008 * T + 9.398e-4 * T ** 2 - 1.410e-6 * T ** 3 print('Dielectric of Water is {}'.format(epsilon_r)) n = c * 1e3 * cst.Avogadro formCond = EngFormatter(unit='m') k = np.sqrt((cst.e ** 2 * 2 * n) / (cst.k * (T + 273.15) * cst.epsilon_0 * epsilon_r)) return [k, 1 / k] # , '{}'.format(formCond.format_eng(1/k))] def ElectrostaticPotential(T, sigma, Tau=5e18, pK=7.9, pH=11): return cst.k * T / cst.e * (np.log(-sigma / (cst.e * Tau + sigma)) + (pK - pH) * np.log(10)) def GetTempRedoxPotential(Cdiff=1e-3 / 1e-1, T=293.15): return cst.R * T / cst.physical_constants['Faraday constant'][0] * np.log(Cdiff) def GetRedoxError(cmax, cmin, ErrorPercent=0.1, T=293.15): cmaxErr = cmax * ErrorPercent cminErr = cmin * ErrorPercent return cst.R * T / cst.physical_constants['Faraday constant'][0] * np.sqrt( (1 / cmax ** 2 * cmaxErr ** 2 + 1 / cmin ** 2 * cminErr ** 2)) ##Result: Negative ion divided by positive ion # Goldman–Hodgkin–Katz voltage equation def GetIonSelectivityWithoutPotential(c_trans, c_cis, Erev, T): n_trans = c_trans # *1e3#*cst.Avogadro n_cis = c_cis # *1e3#*cst.Avogadro A = Erev * cst.physical_constants['Faraday constant'][0] / (cst.R * T) return -(n_trans - n_cis * np.exp(-A)) * (1 - np.exp(A)) / ((n_trans - n_cis * np.exp(A)) * (1 - np.exp(-A))) def GetIonSelectivityWithPotential(c_trans, c_cis, Erev, T, phi): sel1 = GetIonSelectivityWithoutPotential(c_trans, c_cis, Erev, T) return sel1 * np.exp((-2 * phi * cst.physical_constants['Faraday constant'][0]) / (cst.R * T)) def Selectivity(ConcGradient, V): return V * cst.physical_constants['Faraday constant'][0] / ((cst.R * 293.15) * np.log(ConcGradient)) def GetRedox(cmax, cmin, T=295.15): return cst.R * T / cst.physical_constants['Faraday constant'][0] * np.log(cmax / cmin) diff --git a/MakeIndividualIVs.py b/MakeIndividualIVs.py index ff3c0a1..4084cca 100644 --- a/MakeIndividualIVs.py +++ b/MakeIndividualIVs.py @@ -1,198 +1,204 @@ # -*- coding: utf-8 -*- import numpy as np import scipy import scipy.signal as sig import Functions as uf import pyqtgraph as pg import os import matplotlib.pyplot as plt import matplotlib from tkinter import Tk from tkinter.filedialog import askopenfilenames from matplotlib.font_manager import FontProperties import platform import csv import argparse from pprint import pprint import LoadData #Set font properties fontP = FontProperties() fontP.set_size('small') props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 #Set units formatting from matplotlib.ticker import EngFormatter Res = EngFormatter(unit='Ω', places=2) Cond = EngFormatter(unit='S', places=2) SpesCond = EngFormatter(unit='S/m', places=2) size = EngFormatter(unit='m', places=2) Tk().withdraw() if (platform.system()=='Darwin'): os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' ''') #Define default parameters for size fitting expname = 'All' Parameters = { 'Type': 'Nanopore', #Nanopore, Nanocapillary, NanocapillaryShrunken 'reversePolarity': 0, 'specificConductance': 10.5, #10.5 S/m for 1M KCl 'delay':2, #seconds for reading current #Nanopore 'poreLength' : 1e-9, #Nanocapillary 'taperLength' : 3.3e-3, 'innerDiameter' : 0.2e-3, 'taperLengthShaft' : 543e-9, 'innerDiameterShaft' : 514e-9, #Fit option 'CurveFit' : 'PolyFit' #PolyFit YorkFit } +def GetParameters(): + print("Usage:") + print("run(filenames,newParameters={},verbose=False)") + print() + print("Default Parameters:") + pprint(Parameters) def run(filenames,newParameters={},verbose=False): for key in newParameters: Parameters[key]=newParameters[key] Type=Parameters['Type'] CurveFit=Parameters['CurveFit'] specificConductance=Parameters['specificConductance'] for filename in filenames: os.chdir(os.path.dirname(filename)) print(filename) #Make Dir to save images output = LoadData.OpenFile(filename, verbose=verbose) if Parameters['reversePolarity']: print('Polarity Reversed!!!!') output['i1']= -output['i1'] output['v1']= -output['v1'] directory = (str(os.path.split(filename)[0]) + os.sep + expname + '_SavedImages') if not os.path.exists(directory): os.makedirs(directory) AllData = uf.MakeIVData(output, approach='mean', delay = Parameters['delay'])#, UseVoltageRange = [-0.4, 0.4]) print('uf.MakeIVData') if AllData == 0: print('!!!! No Sweep in: ' + filename) continue #Plot Considered Part #figExtracteParts = plt.figure(1) #ax1 = figExtracteParts.add_subplot(211) #ax2 = figExtracteParts.add_subplot(212, sharex=ax1) #(ax1, ax2) = uf.PlotExtractedPart(output, AllData, current = 'i1', unit=1e9, axis = ax1, axis2=ax2) #plt.show() #figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.eps') #figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.png', dpi=150) # Plot IV if output['graphene']: figIV2 = plt.figure(3, figsize=(10, 10)) figIV2.clear() ax2IV = figIV2.add_subplot(111) ax2IV = uf.PlotIV(output, AllData, current='i2', unit=1e9, axis=ax2IV, WithFit=1) figIV2.tight_layout() figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.png', dpi=300) figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.eps') figIV = plt.figure(2, figsize=(10, 7)) ax1IV = figIV.add_subplot(111) current = 'i1' #ax1IV = uf.PlotIV(output, AllData, current='i1', unit=1, axis = ax1IV, WithFit = 1, useEXP = 0, color ='y', # labeltxt='MeanFit', PoreSize=[10, 1e-9], title=str(os.path.split(filename)[1])) Slope=AllData[current][CurveFit]['Slope'] Yintercept=AllData[current][CurveFit]['Yintercept'] if Type=='Nanopore': poreLength=Parameters['poreLength'] textstr = 'Nanopore Size\n\nSpecific Conductance: {}\nLength: {}\n\nConductance: {}\nDiameter: {}'\ .format(SpesCond.format_data(specificConductance),size.format_data(poreLength), Cond.format_data(Slope), size.format_data(uf.CalculatePoreSize(Slope, poreLength, specificConductance))) elif Type=='Nanocapillary': taperLength=Parameters['taperLength'] innerDiameter=Parameters['innerDiameter'] textstr = 'Nanocapillary Size\n\nSpecific Conductance: {}\nTaper lenghth {}:\nInner diameter: {}:\n\nConductance: {}\nDiameter: {}'.\ format(SpesCond.format_data(specificConductance),size.format_data(taperLength),size.format_data(innerDiameter),Cond.format_data(Slope), size.format_data(uf.CalculateCapillarySize(Slope, innerDiameter, taperLength, specificConductance))) elif Type=='NanocapillaryShrunken': taperLength=Parameters['taperLength'] innerDiameter=Parameters['innerDiameter'] taperLengthShaft=Parameters['taperLengthShaft'] innerDiameterShaft=Parameters['innerDiameterShaft'] NCSize=uf.CalculateShrunkenCapillarySize(Slope,innerDiameter, taperLength,specificConductance,taperLengthShaft,innerDiameterShaft) textstr = 'Shrunken Nanocapillary Size\n\nSpecific Conductance: {}\nTaper length: {}\nInner diameter: {}\nTaper length at shaft: {}' \ '\nInner Diameter at shaft: {}:\n\nConductance: {}\nDiameter: {}'.\ format(SpesCond.format_data(specificConductance),size.format_data(taperLength),size.format_data(innerDiameter), size.format_data(taperLengthShaft),size.format_data(innerDiameterShaft), Cond.format_data(Slope), size.format_data(NCSize)) ax1IV.text(0.05, 0.95, textstr, transform=ax1IV.transAxes, fontsize=12, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5)) ind = np.argsort(AllData[current]['Voltage']) ax1IV.errorbar(AllData[current]['Voltage'][ind], AllData[current]['Mean'][ind], yerr=AllData[current]['SE'][ind], fmt='o', color='b') ax1IV.plot(AllData[current]['Voltage'][ind], np.polyval([Slope,Yintercept], AllData[current]['Voltage'][ind]), color='r') ax1IV.set_title(str(os.path.split(filename)[1])+ '\nR=' + Res.format_data(1/Slope) + ', G=' + Cond.format_data(Slope)) ax1IV.set_ylabel('Current') ax1IV.set_xlabel('Voltage') ax1IV.xaxis.set_major_formatter(EngFormatter(unit='V')) ax1IV.yaxis.set_major_formatter(EngFormatter(unit='A')) figIV.savefig(directory + os.sep + str(os.path.split(filename)[1]) + Type+'IV_i1.pdf', transparent=True) x=AllData[current]['Voltage'][ind] y=AllData[current]['Mean'][ind] csvfile=directory + os.sep + str(os.path.split(filename)[1]) + Type+'IV_i1.csv' with open(csvfile, 'w') as output: writer=csv.writer(output, lineterminator='\n') for i in range(len(x)): writer.writerow([x[i] , y[i]]) plt.show() figIV.clear() if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', help='input file') parser.add_argument('-c', '--coeff', help='Coefficients for calculating pore size', nargs='+') parser args = parser.parse_args() inputData=args.input if inputData==None: inputData=askopenfilenames() else: inputData={inputData} newParameters={} if args.coeff is not None: if len(args.coeff) % 2 == 0: for i in range(0, len(args.coeff), 2): if i <= len(args.coeff): newParameters[str(args.coeff[i])]=args.coeff[i+1] run(inputData,newParameters) \ No newline at end of file diff --git a/ObsoleteFunctions/MakePSDs.py b/ObsoleteFunctions/MakePSDs.py index 4224208..664edf4 100644 --- a/ObsoleteFunctions/MakePSDs.py +++ b/ObsoleteFunctions/MakePSDs.py @@ -1,97 +1,97 @@ #Input Stuff import AnalysisParameters as pm import numpy as np import scipy import scipy.signal as sig import Functions as f import os import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from tkinter import Tk from tkinter.filedialog import askopenfilenames from matplotlib.ticker import EngFormatter from matplotlib.font_manager import FontProperties fontP = FontProperties() props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) import pyqtgraph as pg fontP.set_size('small') pm.init() root = Tk() root.withdraw() os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' ''') SaveAsPDF=0 foldername='PSD' # Load File, empty string prompts a pop.up window for file selection. Else a file-path can be given root.update() -filenames = ['/Users/migraf/Desktop/Temp/BigBerta_Noise_2.dat'] -#filenames = askopenfilenames() +#filenames = ['/Users/migraf/Desktop/Temp/BigBerta_Noise_2.dat'] +filenames = askopenfilenames() root.destroy() # Make Dir to save images directory = (str(os.path.split(filenames[0])[0]) + os.sep + foldername) if not os.path.exists(directory): os.makedirs(directory) if SaveAsPDF: pp = PdfPages(directory + os.sep + '_ALLPSD.pdf') for filename in filenames: print(filename) #filename = '/Users/migraf/Desktop/TestTrace/07B_10mMKClBoth_1kBDN_BothChannels12.dat' inp = f.OpenFile(filename) folder = str(os.path.split(filename)[0]) + os.sep +'PSD' file = os.sep + str(os.path.split(filename)[1][:-4]) if not os.path.exists(folder): os.makedirs(folder) fig1 = plt.figure() ax = fig1.add_subplot(211) ax2 = fig1.add_subplot(212, sharex = ax) fr, Pxx_den = scipy.signal.periodogram(inp['i1'], inp['samplerate']) #f, Pxx_den = scipy.signal.welch(input, samplerate, nperseg=10*256, scaling='spectrum') ax.set_ylabel(r'PSD of Ionic [$\frac{pA^2}{Hz}$]') ax.set_xlabel('Frequency') ax.set_yscale('log') ax.set_xscale('log') #ax.set_ylim(1e4, 1e17) #ax.set_xlim(100e-3, 100e3) ax.xaxis.set_major_formatter(EngFormatter(unit='Hz')) ax.plot(fr, Pxx_den*1e24, 'b') ax.grid(1) ax.autoscale() if inp['graphene']: print(np.mean(inp['v2'])) fr, Pxx_den = scipy.signal.periodogram(inp['i2'], inp['samplerate']) ax2.plot(fr, Pxx_den * 1e24, 'r') ax2.set_ylabel(r'PSD of Transverse [$\frac{pA^2}{Hz}$]') ax2.set_xlabel('Frequency') ax2.set_yscale('log') ax2.set_xscale('log') ax2.xaxis.set_major_formatter(EngFormatter(unit='Hz')) ax2.plot(fr, Pxx_den * 1e24, 'r') ax2.grid(1) ax2.autoscale() #ax.legend(['Ion', 'Transverse']) textstr = 'STD ionic: {}\nSTD trans: {}'.format(pg.siFormat(np.std(inp['i1'])), pg.siFormat(np.std(inp['i2']))) ax2.set_title('STD trans: {}'.format(pg.siFormat(np.std(inp['i2'])))) ax.set_title('STD ionic: {}'.format(pg.siFormat(np.std(inp['i1'])))) else: textstr = 'STD ionic: {}'.format(pg.siFormat(np.std(inp['i1']))) ax.set_title(textstr) #ax2.text(0.75, 0.1, textstr, transform=ax.transAxes, fontsize=8, # verticalalignment='bottom', bbox=props) fig1.tight_layout() if SaveAsPDF: pp.savefig(fig1) else: fig1.savefig(directory + os.sep + str(os.path.split(filename)[1][:-4]) + '_PSD.png', dpi=300) fig1.clear() ax.clear() plt.close(fig1) if SaveAsPDF: pp.close() diff --git a/ObsoleteFunctions/UsefulFunctions.py b/ObsoleteFunctions/UsefulFunctions.py index 2a64289..0444df9 100644 --- a/ObsoleteFunctions/UsefulFunctions.py +++ b/ObsoleteFunctions/UsefulFunctions.py @@ -1,1835 +1,1835 @@ import matplotlib matplotlib.use('Qt5Agg') import numpy as np import scipy import scipy.signal as sig import os import pickle as pkl from scipy import io from scipy import signal from PyQt5 import QtGui, QtWidgets from numpy import linalg as lin import pyqtgraph as pg import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import pandas as pd import h5py from timeit import default_timer as timer import platform from scipy.optimize import curve_fit import AnalysisParameters as pm def Reshape1DTo2D(inputarray, buffersize): npieces = np.uint16(len(inputarray)/buffersize) voltages = np.array([], dtype=np.float64) currents = np.array([], dtype=np.float64) #print(npieces) for i in range(1, npieces+1): if i % 2 == 1: currents = np.append(currents, inputarray[(i-1)*buffersize:i*buffersize-1], axis=0) #print('Length Currents: {}'.format(len(currents))) else: voltages = np.append(voltages, inputarray[(i-1)*buffersize:i*buffersize-1], axis=0) #print('Length Voltages: {}'.format(len(voltages))) v1 = np.ones((len(voltages)), dtype=np.float64) i1 = np.ones((len(currents)), dtype=np.float64) v1[:]=voltages i1[:]=currents out = {'v1': v1, 'i1': i1} print('Currents:' + str(v1.shape)) print('Voltages:' + str(i1.shape)) return out def CalculatePoreSize(G, L, s): return (G+np.sqrt(G*(G+16*L*s/np.pi)))/(2*s) def ImportAxopatchData(datafilename): x=np.fromfile(datafilename, np.dtype('>f4')) f=open(datafilename, 'rb') graphene=0 for i in range(0, 10): a=str(f.readline()) #print(a) if 'Acquisition' in a or 'Sample Rate' in a: samplerate=int(''.join(i for i in a if i.isdigit()))/1000 if 'FEMTO preamp Bandwidth' in a: femtoLP=int(''.join(i for i in a if i.isdigit())) if 'I_Graphene' in a: graphene=1 print('This File Has a Graphene Channel!') end = len(x) if graphene: #pore current i1 = x[250:end-3:4] #graphene current i2 = x[251:end-2:4] #pore voltage v1 = x[252:end-1:4] #graphene voltage v2 = x[253:end:4] print('The femto was set to : {} Hz, if this value was correctly entered in the LabView!'.format(str(femtoLP))) output={'FemtoLowPass': femtoLP, 'type': 'Axopatch', 'graphene': 1, 'samplerate': samplerate, 'i1': i1, 'v1': v1, 'i2': i2, 'v2': v2, 'filename': datafilename} else: i1 = np.array(x[250:end-1:2]) v1 = np.array(x[251:end:2]) output={'type': 'Axopatch', 'graphene': 0, 'samplerate': samplerate, 'i1': i1, 'v1': v1, 'filename': datafilename} return output def ImportChimeraRaw(datafilename, LPfiltercutoff=0): matfile=io.loadmat(str(os.path.splitext(datafilename)[0])) #buffersize=matfile['DisplayBuffer'] data = np.fromfile(datafilename, np.dtype('= coeff['eventlengthLimit'] or i > (Ni - 10): + if duration >= coeff['maxEventLength'] or i > (Ni - 10): NumberOfEvents -= 1 else: k = start while signal[k] < Mm and k > 1: k -= 1 start = k - 1 k2 = i + 1 while signal[k2] > Mm: k2 -= 1 endp = k2 + 1 RoughEventLocations.append((start,endp,Mm, Vv)) ml[i] = Mm vl[i] = Vv return np.array(RoughEventLocations) def RecursiveLowPassFast(signal, coeff): ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml)) sl = ml - coeff['S'] * np.sqrt(vl) Ni = len(signal) points = np.array(np.where(signal<=sl)[0]) to_pop=np.array([]) for i in range(1,len(points)): if points[i] - points[i - 1] == 1: to_pop=np.append(to_pop, i) points = np.delete(points, to_pop) RoughEventLocations = [] NumberOfEvents=0 for i in points: if NumberOfEvents is not 0: if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]: continue NumberOfEvents += 1 start = i El = ml[i] - coeff['E'] * np.sqrt(vl[i]) Mm = ml[i] Vv = vl[i] duration = 0 - while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']: + while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['maxEventLength']: duration += 1 i += 1 - if duration >= coeff['eventlengthLimit'] or i > (Ni - 10): + if duration >= coeff['maxEventLength'] or i > (Ni - 10): NumberOfEvents -= 1 else: k = start while signal[k] < Mm and k > 1: k -= 1 start = k - 1 k2 = i + 1 while signal[k2] > Mm: k2 -= 1 endp = k2 if start<0: start=0 RoughEventLocations.append((start, endp, ml[start], vl[start])) return np.array(RoughEventLocations) def RecursiveLowPassFastUp(signal, coeff): ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml)) sl = ml + coeff['S'] * np.sqrt(vl) Ni = len(signal) points = np.array(np.where(signal>=sl)[0]) to_pop=np.array([]) for i in range(1,len(points)): if points[i] - points[i - 1] == 1: to_pop=np.append(to_pop, i) points = np.delete(points, to_pop) points =np.delete(points, np.array(np.where(points == 0)[0])) RoughEventLocations = [] NumberOfEvents=0 for i in points: if NumberOfEvents is not 0: if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]: continue NumberOfEvents += 1 start = i El = ml[i] + coeff['E'] * np.sqrt(vl[i]) Mm = ml[i] duration = 0 - while signal[i + 1] > El and i < (Ni - 2) and duration < coeff['eventlengthLimit']: + while signal[i + 1] > El and i < (Ni - 2) and duration < coeff['maxEventLength']: duration += 1 i += 1 - if duration >= coeff['eventlengthLimit'] or i > (Ni - 10): + if duration >= coeff['maxEventLength'] or i > (Ni - 10): NumberOfEvents -= 1 else: k = start while signal[k] > Mm and k > 2: k -= 1 start = k - 1 k2 = i + 1 while signal[k2] > Mm: k2 -= 1 endp = k2 RoughEventLocations.append((start, endp, ml[start], vl[start])) return np.array(RoughEventLocations) def AddInfoAfterRecursive(self): startpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 0]) endpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 1]) localBaseline = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 2] localVariance = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 3] CusumBaseline=500 numberofevents = len(startpoints) self.AnalysisResults[self.sig]['StartPoints'] = startpoints self.AnalysisResults[self.sig]['EndPoints'] = endpoints self.AnalysisResults[self.sig]['LocalBaseline'] = localBaseline self.AnalysisResults[self.sig]['LocalVariance'] = localVariance self.AnalysisResults[self.sig]['NumberOfEvents'] = len(startpoints) #### Now we want to move the endpoints to be the last minimum for each #### #### event so we find all minimas for each event, and set endpoint to last #### deli = np.zeros(numberofevents) dwell = np.zeros(numberofevents) limit=500e-6*self.out['samplerate'] AllFits={} for i in range(numberofevents): length = endpoints[i] - startpoints[i] if length <= limit and length>3: # Impulsion Fit to minimal value deli[i] = localBaseline[i] - np.min(self.out[self.sig][startpoints[i]+1:endpoints[i]-1]) dwell[i] = (endpoints[i] - startpoints[i]) / self.out['samplerate'] elif length > limit: deli[i] = localBaseline[i] - np.mean(self.out[self.sig][startpoints[i]+5:endpoints[i]-5]) dwell[i] = (endpoints[i] - startpoints[i]) / self.out['samplerate'] # # Cusum Fit #sigma = np.sqrt(localVariance[i]) #delta = 2e-9 #h = 1 * delta / sigma #(mc, kd, krmv) = CUSUM(self.out[self.sig][startpoints[i]-CusumBaseline:endpoints[i]+CusumBaseline], delta, h) # zeroPoint = startpoints[i]-CusumBaseline # krmv = krmv+zeroPoint+1 # AllFits['Event' + str(i)] = {} # AllFits['Event' + str(i)]['mc'] = mc # AllFits['Event' + str(i)]['krmv'] = krmv else: deli[i] = localBaseline[i] - np.min(self.out[self.sig][startpoints[i]:endpoints[i]]) dwell[i] = (endpoints[i] - startpoints[i]) / self.out['samplerate'] frac = deli / localBaseline dt = np.array(0) dt = np.append(dt, np.diff(startpoints) / self.out['samplerate']) numberofevents = len(dt) #self.AnalysisResults[self.sig]['CusumFits'] = AllFits AnalysisResults[self.sig]['FractionalCurrentDrop'] = frac AnalysisResults[self.sig]['DeltaI'] = deli AnalysisResults[self.sig]['DwellTime'] = dwell self.AnalysisResults[self.sig]['Frequency'] = dt def SavingAndPlottingAfterRecursive(self): startpoints=self.AnalysisResults[self.sig]['StartPoints'] endpoints=self.AnalysisResults[self.sig]['EndPoints'] numberofevents=self.AnalysisResults[self.sig]['NumberOfEvents'] deli=self.AnalysisResults[self.sig]['DeltaI'] dwell=self.AnalysisResults[self.sig]['DwellTime'] frac=self.AnalysisResults[self.sig]['FractionalCurrentDrop'] dt=self.AnalysisResults[self.sig]['Frequency'] localBaseline=self.AnalysisResults[self.sig]['LocalBaseline'] if not self.ui.actionDon_t_Plot_if_slow.isChecked(): self.p1.clear() # Event detection plot, Main Window self.p1.plot(self.t, self.out[self.sig], pen='b') self.p1.plot(self.t[startpoints], self.out[self.sig][startpoints], pen=None, symbol='o', symbolBrush='g', symbolSize=10) self.p1.plot(self.t[endpoints], self.out[self.sig][endpoints], pen=None, symbol='o', symbolBrush='r', symbolSize=10) #self.p1.plot(self.t[startpoints-10], localBaseline, pen=None, symbol='x', symbolBrush='y', symbolSize=10) try: self.p2.data = self.p2.data[np.where(np.array(self.sdf.fn) != self.matfilename)] except: IndexError self.sdf = self.sdf[self.sdf.fn != self.matfilename] fn = pd.Series([self.matfilename, ] * numberofevents) color = pd.Series([self.cb.color(), ] * numberofevents) self.sdf = self.sdf.append(pd.DataFrame({'fn': fn, 'color': color, 'deli': deli, 'frac': frac, 'dwell': dwell, 'dt': dt, 'startpoints': startpoints, 'endpoints': endpoints, 'baseline': localBaseline}), ignore_index=True) self.p2.addPoints(x=np.log10(dwell), y=frac, symbol='o', brush=(self.cb.color()), pen=None, size=10) self.w1.addItem(self.p2) self.w1.setLogMode(x=True, y=False) self.p1.autoRange() self.w1.autoRange() self.ui.scatterplot.update() self.w1.setRange(yRange=[0, 1]) colors = self.sdf.color for i, x in enumerate(colors): fracy, fracx = np.histogram(self.sdf.frac[self.sdf.color == x], bins=np.linspace(0, 1, int(self.ui.fracbins.text()))) deliy, delix = np.histogram(self.sdf.deli[self.sdf.color == x], bins=np.linspace(float(self.ui.delirange0.text()) * 10 ** -9, float(self.ui.delirange1.text()) * 10 ** -9, int(self.ui.delibins.text()))) bins_dwell=np.linspace(float(self.ui.dwellrange0.text()), float(self.ui.dwellrange1.text()), int(self.ui.dwellbins.text())) dwelly, dwellx = np.histogram(np.log10(self.sdf.dwell[self.sdf.color == x]), bins=bins_dwell,range=(bins_dwell.min(),bins_dwell.max())) dty, dtx = np.histogram(self.sdf.dt[self.sdf.color == x], bins=np.linspace(float(self.ui.dtrange0.text()), float(self.ui.dtrange1.text()), int(self.ui.dtbins.text()))) # hist = pg.PlotCurveItem(fracy, fracx , stepMode = True, fillLevel=0, brush = x, pen = 'k') # self.w2.addItem(hist) hist = pg.BarGraphItem(height=fracy, x0=fracx[:-1], x1=fracx[1:], brush=x) self.w2.addItem(hist) # hist = pg.PlotCurveItem(delix, deliy , stepMode = True, fillLevel=0, brush = x, pen = 'k') # self.w3.addItem(hist) hist = pg.BarGraphItem(height=deliy, x0=delix[:-1], x1=delix[1:], brush=x) self.w3.addItem(hist) # self.w3.autoRange() self.w3.setRange( xRange=[float(self.ui.delirange0.text()) * 10 ** -9, float(self.ui.delirange1.text()) * 10 ** -9]) # hist = pg.PlotCurveItem(dwellx, dwelly , stepMode = True, fillLevel=0, brush = x, pen = 'k') # self.w4.addItem(hist) hist = pg.BarGraphItem(height=dwelly, x0=dwellx[:-1], x1=dwellx[1:], brush=x) self.w4.addItem(hist) # hist = pg.PlotCurveItem(dtx, dty , stepMode = True, fillLevel=0, brush = x, pen = 'k') # self.w5.addItem(hist) hist = pg.BarGraphItem(height=dty, x0=dtx[:-1], x1=dtx[1:], brush=x) self.w5.addItem(hist) def save(self): np.savetxt(self.matfilename + 'DB.txt', np.column_stack((self.deli, self.frac, self.dwell, self.dt)), delimiter='\t') def PlotEventSingle(self, clicked=[]): f = h5py.File(self.matfilename + '_OriginalDB.hdf5', "r") sig='i1' startpoints=self.AnalysisResults[sig]['StartPoints'] endpoints=self.AnalysisResults[sig]['EndPoints'] localBaseline=self.AnalysisResults[sig]['LocalBaseline'] # Reset plot self.p3.setLabel('bottom', text='Time', units='s') self.p3.setLabel('left', text='Current', units='A') self.p3.clear() eventnumber = np.int(self.ui.eventnumberentry.text()) eventbuffer = np.int(self.ui.eventbufferentry.value()) # plot event trace self.p3.plot(self.t[startpoints[eventnumber] - eventbuffer:endpoints[eventnumber] + eventbuffer], self.out[sig][startpoints[eventnumber] - eventbuffer:endpoints[eventnumber] + eventbuffer], pen='b') # plot event fit self.p3.plot(self.t[startpoints[eventnumber] - eventbuffer:endpoints[eventnumber] + eventbuffer], np.concatenate(( np.repeat(np.array([localBaseline[eventnumber]]), eventbuffer), np.repeat(np.array([localBaseline[eventnumber] - self.AnalysisResults['i1']['DeltaI'][eventnumber ]]), endpoints[eventnumber] - startpoints[eventnumber]), np.repeat(np.array([localBaseline[eventnumber]]), eventbuffer)), 0), pen=pg.mkPen(color=(173, 27, 183), width=3)) self.p3.autoRange() def PlotEventDouble(self, clicked=[]): f = h5py.File(self.matfilename + '_OriginalDB.hdf5', "r") if self.ui.actionPlot_i1_detected_only.isChecked(): indexes = f['LowPassSegmentation/i1/OnlyIndex'] i = f['LowPassSegmentation/i1/'] sig = 'i1' sig2 = 'i2' leftlabel = "Ionic Current" rightlabel = "Transverse Current" if self.ui.actionPlot_i2_detected_only.isChecked(): indexes = f['LowPassSegmentation/i2/OnlyIndex'] i = f['LowPassSegmentation/i2/'] sig = 'i2' sig2 = 'i1' rightlabel = "Ionic Current" leftlabel = "Transverse Current" self.p3.clear() self.transverseAxisEvent.clear() p1 = self.p3.plotItem p1.getAxis('left').setLabel(text=leftlabel, color='#0000FF', units='A') ## create a new ViewBox, link the right axis to its coordinate system p1.showAxis('right') p1.scene().addItem(self.transverseAxisEvent) p1.getAxis('right').linkToView(self.transverseAxisEvent) self.transverseAxisEvent.setXLink(p1) self.transverseAxisEvent.show() p1.getAxis('right').setLabel(text=rightlabel, color='#FF0000', units='A') def updateViews(): ## view has resized; update auxiliary views to match self.transverseAxisEvent.setGeometry(p1.vb.sceneBoundingRect()) self.transverseAxisEvent.linkedViewChanged(p1.vb, self.transverseAxisEvent.XAxis) updateViews() p1.vb.sigResized.connect(updateViews) # Correct for user error if non-extistent number is entered eventbuffer = np.int(self.ui.eventbufferentry.value()) maxEvents=self.NumberOfEvents eventnumber = np.int(self.ui.eventnumberentry.text()) if eventnumber >= maxEvents: eventnumber=0 self.ui.eventnumberentry.setText(str(eventnumber)) elif eventnumber < 0: eventnumber=maxEvents self.ui.eventnumberentry.setText(str(eventnumber)) # plot event trace parttoplot=np.arange(i['StartPoints'][indexes[eventnumber]] - eventbuffer, i['EndPoints'][indexes[eventnumber]] + eventbuffer,1, dtype=np.uint64) p1.plot(self.t[parttoplot], self.out[sig][parttoplot], pen='b') # plot event fit p1.plot(self.t[parttoplot], np.concatenate(( np.repeat(np.array([i['LocalBaseline'][indexes[eventnumber]]]), eventbuffer), np.repeat(np.array([i['LocalBaseline'][indexes[eventnumber]] - i['DeltaI'][indexes[eventnumber] ]]), i['EndPoints'][indexes[eventnumber]] - i['StartPoints'][indexes[eventnumber]]), np.repeat(np.array([i['LocalBaseline'][indexes[eventnumber]]]), eventbuffer)), 0), pen=pg.mkPen(color=(173, 27, 183), width=3)) # plot 2nd Channel if self.Derivative == 'i2': self.transverseAxisEvent.addItem(pg.PlotCurveItem(self.t[parttoplot][:-1], np.diff(self.out[sig][parttoplot]), pen='r')) p1.getAxis('right').setLabel(text='Derivative of i2', color='#FF0000', units='A') print('In if...') #plt.plot(t[:-1], np.diff(i1part), 'y') else: self.transverseAxisEvent.addItem(pg.PlotCurveItem(self.t[parttoplot], self.out[sig2][parttoplot], pen='r')) min1 = np.min(self.out[sig][parttoplot]) max1 = np.max(self.out[sig][parttoplot]) self.p3.setYRange(min1-(max1-min1), max1) self.p3.enableAutoRange(axis='x') min2 = np.min(self.out[sig2][parttoplot]) max2 = np.max(self.out[sig2][parttoplot]) self.transverseAxisEvent.setYRange(min2, max2+(max2-min2)) # Mark event start and end points p1.plot([self.t[i['StartPoints'][indexes[eventnumber]]], self.t[i['StartPoints'][indexes[eventnumber]]]], [self.out[sig][i['StartPoints'][indexes[eventnumber]]], self.out[sig][i['StartPoints'][indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='g', symbolSize=12) p1.plot([self.t[i['EndPoints'][indexes[eventnumber]]], self.t[i['EndPoints'][indexes[eventnumber]]]], [self.out[sig][i['EndPoints'][indexes[eventnumber]]], self.out[sig][i['EndPoints'][indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='r', symbolSize=12) dtime=pg.siFormat(i['DwellTime'][indexes[eventnumber]], precision=5, suffix='s', space=True, error=None, minVal=1e-25, allowUnicode=True) dI=pg.siFormat(i['DwellTime'][indexes[eventnumber]], precision=5, suffix='A', space=True, error=None, minVal=1e-25, allowUnicode=True) self.ui.eventinfolabel.setText(leftlabel + ': Dwell Time=' + dtime + ', Deli=' + dI) def PlotEventDoubleFit(self, clicked=[]): f = h5py.File(self.matfilename + '_OriginalDB.hdf5', "r") i1_indexes=f['LowPassSegmentation/i1/CommonIndex'] i2_indexes=f['LowPassSegmentation/i2/CommonIndex'] i1=f['LowPassSegmentation/i1/'] i2=f['LowPassSegmentation/i2/'] self.p3.clear() self.transverseAxisEvent.clear() leftlabel="Ionic Current" rightlabel="Transverse Current" p1 = self.p3.plotItem p1.getAxis('left').setLabel(text=leftlabel, color='#0000FF', units='A') ## create a new ViewBox, link the right axis to its coordinate system p1.showAxis('right') p1.scene().addItem(self.transverseAxisEvent) p1.getAxis('right').linkToView(self.transverseAxisEvent) self.transverseAxisEvent.setXLink(p1) self.transverseAxisEvent.show() p1.getAxis('right').setLabel(text=rightlabel, color='#FF0000', units='A') def updateViews(): ## view has resized; update auxiliary views to match self.transverseAxisEvent.setGeometry(p1.vb.sceneBoundingRect()) self.transverseAxisEvent.linkedViewChanged(p1.vb, self.transverseAxisEvent.XAxis) updateViews() p1.vb.sigResized.connect(updateViews) # Correct for user error if non-extistent number is entered eventbuffer = np.int(self.ui.eventbufferentry.value()) maxEvents=len(i1_indexes) eventnumber = np.int(self.ui.eventnumberentry.text()) if eventnumber >= maxEvents: eventnumber=0 self.ui.eventnumberentry.setText(str(eventnumber)) elif eventnumber < 0: eventnumber=maxEvents self.ui.eventnumberentry.setText(str(eventnumber)) # plot event trace parttoplot=np.arange(i1['StartPoints'][i1_indexes[eventnumber]] - eventbuffer, i1['EndPoints'][i1_indexes[eventnumber]] + eventbuffer,1, dtype=np.uint64) parttoplot2=np.arange(i2['StartPoints'][i2_indexes[eventnumber]] - eventbuffer, i2['EndPoints'][i2_indexes[eventnumber]] + eventbuffer,1, dtype=np.uint64) p1.plot(self.t[parttoplot], self.out['i1'][parttoplot], pen='b') # plot event fit p1.plot(self.t[parttoplot], np.concatenate(( np.repeat(np.array([i1['LocalBaseline'][i1_indexes[eventnumber]]]), eventbuffer), np.repeat(np.array([i1['LocalBaseline'][i1_indexes[eventnumber]] - i1['DeltaI'][i1_indexes[eventnumber] ]]), i1['EndPoints'][i1_indexes[eventnumber]] - i1['StartPoints'][i1_indexes[eventnumber]]), np.repeat(np.array([i1['LocalBaseline'][i1_indexes[eventnumber]]]), eventbuffer)), 0), pen=pg.mkPen(color=(173, 27, 183), width=3)) # plot 2nd Channel self.transverseAxisEvent.addItem(pg.PlotCurveItem(self.t[parttoplot2], self.out['i2'][parttoplot2], pen='r')) self.transverseAxisEvent.addItem(pg.PlotCurveItem(self.t[parttoplot2], np.concatenate((np.repeat( np.array([i2['LocalBaseline'][i2_indexes[eventnumber]]]), eventbuffer), np.repeat( np.array([i2['LocalBaseline'][i2_indexes[eventnumber]] - i2['DeltaI'][i2_indexes[eventnumber]]]), i2['EndPoints'][i2_indexes[eventnumber]] - i2['StartPoints'][i2_indexes[eventnumber]]), np.repeat( np.array([i2['LocalBaseline'][i2_indexes[eventnumber]]]), eventbuffer)), 0), pen=pg.mkPen(color=(173, 27, 183), width=3))) min1 = np.min(self.out['i1'][parttoplot]) max1 = np.max(self.out['i1'][parttoplot]) self.p3.setYRange(min1-(max1-min1), max1) self.p3.enableAutoRange(axis='x') min2 = np.min(self.out['i2'][parttoplot2]) max2 = np.max(self.out['i2'][parttoplot2]) self.transverseAxisEvent.setYRange(min2, max2+(max2-min2)) # Mark event start and end points p1.plot([self.t[i1['StartPoints'][i1_indexes[eventnumber]]], self.t[i1['StartPoints'][i1_indexes[eventnumber]]]], [self.out['i1'][i1['StartPoints'][i1_indexes[eventnumber]]], self.out['i1'][i1['StartPoints'][i1_indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='g', symbolSize=12) p1.plot([self.t[i1['EndPoints'][i1_indexes[eventnumber]]], self.t[i1['EndPoints'][i1_indexes[eventnumber]]]], [self.out['i1'][i1['EndPoints'][i1_indexes[eventnumber]]], self.out['i1'][i1['EndPoints'][i1_indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='r', symbolSize=12) self.transverseAxisEvent.addItem(pg.PlotCurveItem([self.t[i2['StartPoints'][i2_indexes[eventnumber]]], self.t[i2['StartPoints'][i2_indexes[eventnumber]]]], [self.out['i2'][i2['StartPoints'][i2_indexes[eventnumber]]], self.out['i2'][i1['StartPoints'][i2_indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='g', symbolSize=12)) self.transverseAxisEvent.addItem(pg.PlotCurveItem([self.t[i2['EndPoints'][i2_indexes[eventnumber]]], self.t[i2['EndPoints'][i2_indexes[eventnumber]]]], [self.out['i2'][i1['EndPoints'][i2_indexes[eventnumber]]], self.out['i2'][i1['EndPoints'][i2_indexes[eventnumber]]]], pen=None, symbol='o', symbolBrush='r', symbolSize=12)) dtime=pg.siFormat(i1['DwellTime'][i1_indexes[eventnumber]], precision=5, suffix='s', space=True, error=None, minVal=1e-25, allowUnicode=True) dI=pg.siFormat(i1['DwellTime'][i1_indexes[eventnumber]], precision=5, suffix='A', space=True, error=None, minVal=1e-25, allowUnicode=True) dtime2=pg.siFormat(i2['DwellTime'][i2_indexes[eventnumber]], precision=5, suffix='s', space=True, error=None, minVal=1e-25, allowUnicode=True) dI2=pg.siFormat(i2['DwellTime'][i2_indexes[eventnumber]], precision=5, suffix='A', space=True, error=None, minVal=1e-25, allowUnicode=True) self.ui.eventinfolabel.setText('Ionic Dwell Time=' + dtime + ', Ionic Deli=' + dI + ', ' 'Trans Dwell Time=' + dtime2 + ', Trans Deli=' + dI2) def SaveEventPlotMatplot(self): eventbuffer = np.int(self.ui.eventbufferentry.value()) eventnumber = np.int(self.ui.eventnumberentry.text()) parttoplot = np.arange(i['StartPoints'][indexes[eventnumber]][eventnumber] - eventbuffer, i['EndPoints'][indexes[eventnumber]][eventnumber] + eventbuffer, 1, dtype=np.uint64) t=np.arange(0,len(parttoplot)) t=t/self.out['samplerate']*1e3 fig1=plt.figure(1, figsize=(20,7)) plt.subplot(2, 1, 1) plt.cla plt.plot(t, self.out['i1'][parttoplot]*1e9, 'b') plt.ylabel('Ionic Current [nA]') ax = plt.gca() ax.set_xticklabels([]) plt.subplot(2, 1, 2) plt.cla plt.plot(t, self.out['i2'][parttoplot]*1e9, 'r') plt.ylabel('Transverse Current [nA]') plt.xlabel('time (ms)') self.pp.savefig() fig1.clear() def CombineTheTwoChannels(file): f = h5py.File(file, 'a') i1 = f['LowPassSegmentation/i1/'] i2 = f['LowPassSegmentation/i2/'] i1StartP = i1['StartPoints'][:] i2StartP = i2['StartPoints'][:] # Common Events # Take Longer CommonEventsi1Index = np.array([], dtype=np.uint64) CommonEventsi2Index = np.array([], dtype=np.uint64) DelayLimit = 10e-3*f['General/Samplerate/'].value for k in i1StartP: val = i2StartP[(i2StartP > k - DelayLimit) & (i2StartP < k + DelayLimit)] if len(val)==1: CommonEventsi2Index = np.append(CommonEventsi2Index, np.where(i2StartP == val)[0]) CommonEventsi1Index = np.append(CommonEventsi1Index, np.where(i1StartP == k)[0]) if len(val) > 1: diff=np.absolute(val-k) minIndex=np.where(diff == np.min(diff)) CommonEventsi2Index = np.append(CommonEventsi2Index, np.where(i2StartP == val[minIndex])[0]) CommonEventsi1Index = np.append(CommonEventsi1Index, np.where(i1StartP == k)[0]) # for i in range(len(i1StartP)): # for j in range(len(i2StartP)): # if np.absolute(i1StartP[i] - i2StartP[j]) < DelayLimit: # CommonEventsi1Index = np.append(CommonEventsi1Index, i) # CommonEventsi2Index = np.append(CommonEventsi2Index, j) # Only i1 Onlyi1Indexes = np.delete(range(len(i1StartP)), CommonEventsi1Index) # Only i2 Onlyi2Indexes = np.delete(range(len(i2StartP)), CommonEventsi2Index) e = "CommonIndex" in i1 if e: del i1['CommonIndex'] i1.create_dataset('CommonIndex', data=CommonEventsi1Index) del i2['CommonIndex'] i2.create_dataset('CommonIndex', data=CommonEventsi2Index) del i1['OnlyIndex'] i1.create_dataset('OnlyIndex', data=Onlyi1Indexes) del i2['OnlyIndex'] i2.create_dataset('OnlyIndex', data=Onlyi2Indexes) else: i1.create_dataset('CommonIndex', data=CommonEventsi1Index) i2.create_dataset('CommonIndex', data=CommonEventsi2Index) i1.create_dataset('OnlyIndex', data=Onlyi1Indexes) i2.create_dataset('OnlyIndex', data=Onlyi2Indexes) CommonIndexes={} CommonIndexes['i1']=CommonEventsi1Index CommonIndexes['i2']=CommonEventsi2Index OnlyIndexes={} OnlyIndexes['i1'] = Onlyi1Indexes OnlyIndexes['i2'] = Onlyi2Indexes return (CommonIndexes, OnlyIndexes) def PlotEvent(t1, t2, i1, i2, fit1 = np.array([]), fit2 = np.array([])): fig1 = plt.figure(1, figsize=(20, 7)) ax1 = fig1.add_subplot(211) ax2 = fig1.add_subplot(212, sharex=ax1) ax1.plot(t1, i1*1e9, 'b') if len(fit1) is not 0: ax1.plot(t1, fit1*1e9, 'y') ax2.plot(t2, i2*1e9, 'r') if len(fit2) is not 0: ax2.plot(t2, fit2*1e9, 'y') ax1.set_ylabel('Ionic Current [nA]') #ax1.set_xticklabels([]) ax2.set_ylabel('Transverse Current [nA]') ax2.set_xlabel('Time [ms]') ax2.ticklabel_format(useOffset=False) ax2.ticklabel_format(useOffset=False) ax1.ticklabel_format(useOffset=False) ax1.ticklabel_format(useOffset=False) return fig1 def EditInfoText(self): text2='ionic: {} events, trans: {} events\n'.format(str(self.AnalysisResults['i1']['RoughEventLocations'].shape[0]), str(self.AnalysisResults['i2']['RoughEventLocations'].shape[0])) text1='The file contains:\n{} Common Events\n{} Ionic Only Events\n{} Transverse Only Events'.format(len(self.CommonIndexes['i1']), len(self.OnlyIndexes['i1']), len(self.OnlyIndexes['i2'])) self.ui.InfoTexts.setText(text2+text1) def creation_date(path_to_file): if platform.system() == 'Windows': return os.path.getctime(path_to_file) else: stat = os.stat(path_to_file) try: return stat.st_mtime except AttributeError: # We're probably on Linux. No easy way to get creation dates here, # so we'll settle for when its content was last modified. return stat.st_mtime def CUSUM(input, delta, h): Nd = 0 kd = len(input) krmv = len(input) k0 = 0 k = 0 l = len(input) m = np.zeros(l) m[k0] = input[k0] v = np.zeros(l) sp = np.zeros(l) Sp = np.zeros(l) gp = np.zeros(l) sn = np.zeros(l) Sn = np.zeros(l) gn = np.zeros(l) while k < l: m[k] = np.mean(input[k0:k]) v[k] = np.var(input[k0:k]) sp[k] = delta / v[k] * (input[k] - m[k] - delta / 2) sn[k] = -delta / v[k] * (input[k] - m[k] + delta / 2) Sp[k] = Sp[k - 1] + sp[k] Sn[k] = Sn[k - 1] + sn[k] gp[k] = np.max(gp[k - 1] + sp[k], 0) gn[k] = np.max(gn[k - 1] + sn[k], 0) if gp[k] > h or gn[k] > h: kd[Nd] = k kmin = int(np.where(Sn == np.min(Sn[k0:k]))[0]) krmv[Nd] = kmin + k0 - 1 if gp(k) > h: kmin = int(np.where(Sn == np.min(Sn[k0:k]))[0]) krmv[Nd] = kmin + k0 - 1 k0 = k m[k0] = input[k0] v[k0] = 0 sp[k0] = 0 Sp[k0] = 0 gp[k0] = 0 sn[k0] = 0 Sn[k0] = 0 gn[k0] = 0 Nd = Nd + 1 k += 1 if Nd == 0: mc = np.mean(input) * np.ones(k) elif Nd == 1: mc = [m[krmv[0]] * np.ones(krmv[0]), m[k] * np.ones(k - krmv[0])] else: mc = m[krmv[0]] * np.ones(krmv[0]) for ii in range(1, Nd): mc = [mc, m[krmv[ii]] * np.ones(krmv[ii] - krmv[ii - 1])] mc = [mc, m(k) * np.ones(k - krmv[Nd])] return (mc, kd, krmv) def CUSUM2(input, delta, h, startp): Nd = 0 kd = [] krmv = [] krmvdown = [] both = [] k0 = 0 k = 0 l = len(input) m = np.zeros(l) m[k0] = input[k0] v = np.zeros(l) sp = np.zeros(l) Sp = np.zeros(l) gp = np.zeros(l) sn = np.zeros(l) Sn = np.zeros(l) gn = np.zeros(l) mc = [] while k < l-1: k += 1 m[k] = np.mean(input[k0:k+1]) v[k] = np.var(input[k0:k+1]) sp[k] = delta / v[k] * (input[k] - m[k] - delta / 2) sn[k] = -delta / v[k] * (input[k] - m[k] + delta / 2) Sp[k] = Sp[k - 1] + sp[k] Sn[k] = Sn[k - 1] + sn[k] gp[k] = np.max([gp[k - 1] + sp[k], 0]) gn[k] = np.max([gn[k - 1] + sn[k], 0]) if gp[k] > h or gn[k] > h: if gp[k] > h: kmin = np.argmin(Sp[k0:k]) krmv.append(kmin + k0 - 1) both.append(kmin + k0 - 1) else: kd.append(k) kmin = np.argmin(Sn[k0:k]) krmvdown.append(kmin + k0 - 1) both.append(kmin + k0 - 1) k0 = k m[k0] = input[k0] v[k0] = 0 sp[k0] = 0 Sp[k0] = 0 gp[k0] = 0 sn[k0] = 0 Sn[k0] = 0 gn[k0] = 0 Nd = Nd + 1 ''' if Nd == 0: mc = np.mean(input) * np.ones(k) elif Nd == 1: mc = [m[krmv[0]] * np.ones(krmv[0]), m[k] * np.ones(k - krmv[0])] else: mc.append(m[krmv[0]] * np.ones(krmv[0])) for ii in range(1, Nd-1): mc.append(m[krmv[ii]] * np.ones(krmv[ii] - krmv[ii - 1])) mc.append(m[k] * np.ones(k - krmv[Nd-1])) ''' ## Calculate Inner Levels if len(both) >= 2: levels = [np.zeros(len(both)-1), np.zeros(len(both)-1), np.zeros(len(both)-1), np.zeros(len(both)-1)] # 0: level number, 1: current, 2: length, 3: std fit = np.array([]) for ind, g in enumerate(both[:-1]): levels[0][ind] = ind levels[1][ind] = np.mean(input[g:both[ind + 1]]) levels[2][ind] = both[ind + 1] - g levels[3][ind] = np.std(input[g:both[ind + 1]]) np.append(fit, levels[1][ind] * np.ones(np.uint64(levels[2][ind]))) else: levels = [] fit = [] out = {'up': krmv+startp, 'down': krmvdown+startp, 'both': both+startp, 'levels':levels, 'fit': fit} return out def cusum_detection(data, threshhold, minlength, maxstates): print(threshhold) s = timer() logp = 0 # instantaneous log-likelihood for positive jumps logn = 0 # instantaneous log-likelihood for negative jumps cpos = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for positive jumps cneg = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for negative jumps gpos = np.zeros(2, dtype='float64') # decision function for positive jumps gneg = np.zeros(2, dtype='float64') # decision function for negative jumps edges = np.array([0], dtype='int64') # initialize an array with the position of the first subevent - the start of the event real_start = np.array([], dtype='int64') # initialize an array with the position of the first subevent - the start of the event real_end = np.array([], dtype='int64') # initialize an array with the position of the first subevent - the start of the event real_Depth = np.array([], dtype='int64') # initialize an array with the position of the first subevent - the start of the event anchor = 0 # the last detected change length = len(data) data_std = np.std(data) h = threshhold / data_std k = 0 nStates = np.uint64(0) varM = data[0] varS = 0 mean = data[0] print('length data =' + str(length)) v = np.zeros(length, dtype='float64') while k < length - 100: k += 1 if nStates == 0: variance = np.var(data[anchor:k]) # initial params for pattern region mean = np.mean(data[anchor:k]) if variance == 0: break logp = threshhold / variance * (data[ k] - mean - threshhold / 2.) # instantaneous log-likelihood for current sample assuming local baseline has jumped in the positive direction logn = -threshhold / variance * (data[ k] - mean + threshhold / 2.) # instantaneous log-likelihood for current sample assuming local baseline has jumped in the negative direction cpos[k] = cpos[k - 1] + logp # accumulate positive log-likelihoods cneg[k] = cneg[k - 1] + logn # accumulate negative log-likelihoods gpos[1] = max(gpos[0] + logp, 0) # accumulate or reset positive decision function gneg[1] = max(gneg[0] + logn, 0) # accumulate or reset negative decision function if (gpos[1] > h or gneg[1] > h): if (gpos[1] > h): # significant positive jump detected jump = np.uint64(anchor + np.argmin(cpos[anchor:k + 1])) # find the location of the start of the jump if jump - edges[np.uint64(nStates)] > minlength and np.abs(data[np.uint64(jump + minlength)] - data[jump]) > threshhold / 4: edges = np.append(edges, jump) nStates += 1 # print('EVENT!!!!! at ='+str(self.t[jump])) anchor = k # no data meaning at bad points! # away from bad point more! cpos[0:len(cpos)] = 0 # reset all decision arrays cneg[0:len(cneg)] = 0 gpos[0:len(gpos)] = 0 gneg[0:len(gneg)] = 0 if (gneg[1] > h): # significant negative jump detected jump = anchor + np.argmin(cneg[anchor:k + 1]) if jump - edges[np.uint64(nStates)] > minlength and np.abs(data[np.uint64(jump + minlength)] - data[jump]) > threshhold / 4: edges = np.append(edges, jump) nStates += 1 # print('EVENT!!!!! at ='+str(self.t[jump] )) anchor = k # no data meaning at bad points! # away from bad point more! cpos[0:len(cpos)] = 0 # reset all decision arrays cneg[0:len(cneg)] = 0 gpos[0:len(gpos)] = 0 gneg[0:len(gneg)] = 0 gpos[0] = gpos[1] gneg[0] = gneg[1] if maxstates > 0: if nStates > maxstates: print('too sensitive') nStates = 0 k = 0 threshhold = threshhold * 1.1 h = h * 1.1 logp = 0 # instantaneous log-likelihood for positive jumps logn = 0 # instantaneous log-likelihood for negative jumps cpos = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for positive jumps cneg = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for negative jumps gpos = np.zeros(2, dtype='float64') # decision function for positive jumps gneg = np.zeros(2, dtype='float64') # decision function for negative jumps edges = np.array([0], dtype='int64') # initialize an array with the position of the first subevent - the start of the event anchor = 0 # the last detected change length = len(data) mean = data[0] nStates = 0 mean = data[0] edges = np.append(edges, len(data) - 1) # mark the end of the event as an edge nStates += 1 cusum = dict() print('Events = ' + str([edges])) for i in range(len(edges) - 1): real_start = np.append(real_start, edges[i]) real_end = np.append(real_end, edges[i + 1]) real_Depth = np.append(real_Depth, np.mean(data[np.uint64(edges[i]):np.uint64(edges[i + 1])])) cusum['Real_Start'] = real_start cusum['Real_End'] = real_end cusum['Real_Depth'] = real_Depth print('Real Start =' + str(cusum['Real_Start'])) print('Real End =' + str(cusum['Real_End'])) cusum['CurrentLevels'] = [np.average(data[np.uint64(edges[i]) + minlength:np.uint64(edges[i + 1])]) for i in range(np.uint64(nStates))] # detect current levels during detected sub-event print('Edges[-1] = ' + str(edges[-1])) cusum['EventDelay'] = edges # locations of sub-events in the data cusum['Threshold'] = threshhold # record the threshold used print('Event = ' + str(cusum['EventDelay'])) cusum['jumps'] = np.diff(cusum['CurrentLevels']) e = timer() print('cusum took = ' + str(e - s) + 's') return cusum def detect_cusum(data, var, threshhold, minlength, maxstates): logp = 0 # instantaneous log-likelihood for positive jumps logn = 0 # instantaneous log-likelihood for negative jumps cpos = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for positive jumps cneg = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for negative jumps gpos = np.zeros(len(data), dtype='float64') # decision function for positive jumps gneg = np.zeros(len(data), dtype='float64') # decision function for negative jumps edges = np.array([0], dtype='int64') # initialize an array with the position of the first subevent - the start of the event anchor = 0 # the last detected change length = len(data) mean = data[0] h = threshhold / var k = self.safety_reg - self.control1 nStates = 0 varM = data[0] varS = 0 mean = data[0] print('length data =' + str(length)) v = np.zeros(length, dtype='float64') while k < length - 100: k += 1 if nStates == 0: variance = np.var(data[anchor:k]) mean = np.mean(data[anchor:k]) logp = threshhold / variance * (data[ k] - mean - threshhold / 2.) # instantaneous log-likelihood for current sample assuming local baseline has jumped in the positive direction logn = -threshhold / variance * (data[ k] - mean + threshhold / 2.) # instantaneous log-likelihood for current sample assuming local baseline has jumped in the negative direction cpos[k] = cpos[k - 1] + logp # accumulate positive log-likelihoods cneg[k] = cneg[k - 1] + logn # accumulate negative log-likelihoods gpos[k] = max(gpos[k - 1] + logp, 0) # accumulate or reset positive decision function gneg[k] = max(gneg[k - 1] + logn, 0) # accumulate or reset negative decision function if (gpos[k] > h or gneg[k] > h): if (gpos[k] > h): # significant positive jump detected jump = anchor + np.argmin(cpos[anchor:k + 1]) # find the location of the start of the jump if jump - edges[nStates] > minlength and np.abs(data[jump + minlength] - data[jump]) > threshhold / 4: edges = np.append(edges, jump) nStates += 1 print('EVENT!!!!! at =' + str((self.control1 + jump) / self.out['samplerate'])) anchor = k # no data meaning at bad points! # away from bad point more! cpos[0:len(cpos)] = 0 # reset all decision arrays cneg[0:len(cneg)] = 0 gpos[0:len(gpos)] = 0 gneg[0:len(gneg)] = 0 if (gneg[k] > h): # significant negative jump detected jump = anchor + np.argmin(cneg[anchor:k + 1]) if jump - edges[nStates] > minlength and np.abs(data[jump + minlength] - data[jump]) > threshhold / 4: edges = np.append(edges, jump) nStates += 1 print('EVENT!!!!! at =' + str((self.control1 + jump) / self.out['samplerate'])) anchor = k # no data meaning at bad points! # away from bad point more! cpos[0:len(cpos)] = 0 # reset all decision arrays cneg[0:len(cneg)] = 0 gpos[0:len(gpos)] = 0 gneg[0:len(gneg)] = 0 if maxstates > 0: if nStates > 10: print('too sensitive') nStates = 0 k = 0 threshhold = threshhold * 1.1 h = h * 1.1 logp = 0 # instantaneous log-likelihood for positive jumps logn = 0 # instantaneous log-likelihood for negative jumps cpos = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for positive jumps cneg = np.zeros(len(data), dtype='float64') # cumulative log-likelihood function for negative jumps gpos = np.zeros(len(data), dtype='float64') # decision function for positive jumps gneg = np.zeros(len(data), dtype='float64') # decision function for negative jumps edges = np.array([0], dtype='int64') # initialize an array with the position of the first subevent - the start of the event anchor = 0 # the last detected change length = len(data) mean = data[0] nStates = 0 mean = data[0] edges = np.append(edges, len(data)) # mark the end of the event as an edge nStates += 1 cusum = dict() cusum['CurrentLevels'] = [np.average(data[edges[i] + minlength:edges[i + 1]]) for i in range(nStates)] # detect current levels during detected sub-events cusum['EventDelay'] = edges # locations of sub-events in the data cusum['Threshold'] = threshhold # record the threshold used cusum['jumps'] = np.diff(cusum['CurrentLevels']) # self.__recordevent(cusum) return cusum def PlotIV(output, AllData, current = 'i1', unit=1e9, axis = '', WithFit = 1, PoreSize = [0,0], title = '', labeltxt = '', Polynomial=0, color='b', RectificationFactor=0, useEXP=0): if useEXP: current=current+'_Exp' if labeltxt is '': labeltxt = str(os.path.split(output['filename'])[1])[:-4] ind = np.argsort(AllData[current]['Voltage']) #Calculate Rectification Factor if RectificationFactor: parts = {'pos': AllData[current]['Voltage'] > 0, 'neg': AllData[current]['Voltage'] < 0} a={}; b={}; sigma_a={}; sigma_b={}; b_save={};x_values={} for part in parts: (a[part], b[part], sigma_a[part], sigma_b[part], b_save[part]) = YorkFit(AllData[current]['Voltage'][parts[part]], AllData[current]['Mean'][parts[part]], 1e-12 * np.ones(len(AllData[current]['Voltage'][parts[part]])), AllData[current]['STD'][parts[part]]) x_values[part] = np.linspace(min(AllData[current]['Voltage'][parts[part]]), max(AllData[current]['Voltage'][parts[part]]), 1000) axis.plot(x_values[part], scipy.polyval([b[part], a[part]], x_values[part])*unit, color=color, ls='--') factor = b['neg'] / b['pos'] labeltxt = labeltxt + ', R:{:4.2f}'.format(factor) #Polynomial is guide to the eye if not Polynomial: axis.errorbar(AllData[current]['Voltage'], AllData[current]['Mean']*unit, yerr=AllData[current]['STD']*unit, fmt='o', label=labeltxt, color=color) else: p = np.polyfit(AllData[current]['Voltage'][ind], AllData[current]['Mean'][ind]*unit, Polynomial) axis.errorbar(AllData[current]['Voltage'][ind], AllData[current]['Mean'][ind]*unit, yerr=AllData[current]['STD'][ind]*unit, fmt='o', label=labeltxt, color=color) axis.plot(AllData[current]['Voltage'][ind], np.polyval(p, AllData[current]['Voltage'][ind]), color=color) axis.set_ylabel('Current ' + current + ' [nA]') axis.set_xlabel('Voltage ' + AllData[current]['SweepedChannel'] +' [V]') if WithFit: axis.set_title(title + '\nG={}S, R={}Ohm'.format(pg.siFormat(AllData[current]['YorkFitValues']['Slope']), pg.siFormat(1/(AllData[current]['YorkFitValues']['Slope'])))) axis.plot(AllData[current]['YorkFitValues']['x_fit'], AllData[current]['YorkFitValues']['y_fit']*unit, 'r--', label='Linear Fit of '+labeltxt) else: axis.set_title(title + 'IV Plot') if PoreSize[0]: textstr = 'Pore Size\nConductance: {}S/m\nLenght: {}m:\ndiameter: {}m'.format(pg.siFormat(PoreSize[0]), pg.siFormat(PoreSize[1]), pg.siFormat(CalculatePoreSize(AllData[current]['YorkFitValues']['Slope'], PoreSize[1], PoreSize[0]))) axis.text(0.05, 0.95, textstr, transform=axis.transAxes, fontsize=12, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5)) return axis def CombineEventDatabases(filename, DBfiles): f = h5py.File(filename, "w") general = f.create_group("RawDB") general.create_dataset('FileName', data=self.out['filename']) general.create_dataset('Samplerate', data=self.out['samplerate']) general.create_dataset('Machine', data=self.out['type']) general.create_dataset('TransverseRecorded', data=self.out['graphene']) segmentation_LP = f.create_group("LowPassSegmentation") for k, l in self.AnalysisResults.items(): set1 = segmentation_LP.create_group(k) lpset1 = set1.create_group('LowPassSettings') for o, p in self.coefficients[k].items(): lpset1.create_dataset(o, data=p) for m, l in self.AnalysisResults[k].items(): set1.create_dataset(m, data=l) def MakeIV(filenames, directory, conductance=10, title=''): if len(conductance) is 1: conductance=np.ones(len(filenames))*conductance for idx,filename in enumerate(filenames): print(filename) # Make Dir to save images output = OpenFile(filename) if not os.path.exists(directory): os.makedirs(directory) AllData = MakeIVData(output, delay=2) if AllData == 0: print('!!!! No Sweep in: ' + filename) continue # Plot Considered Part # figExtracteParts = plt.figure(1) # ax1 = figExtracteParts.add_subplot(211) # ax2 = figExtracteParts.add_subplot(212, sharex=ax1) # (ax1, ax2) = uf.PlotExtractedPart(output, AllData, current = 'i1', unit=1e9, axis = ax1, axis2=ax2) # plt.show() # figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.eps') # figExtracteParts.savefig(directory + os.sep + 'PlotExtracted_' + str(os.path.split(filename)[1])[:-4] + '.png', dpi=150) # Plot IV if output['graphene']: figIV2 = plt.figure(3) ax2IV = figIV2.add_subplot(111) ax2IV = PlotIV(output, AllData, current='i2', unit=1e9, axis=ax2IV, WithFit=1, title=title[idx]) figIV2.tight_layout() figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.png', dpi=300) figIV2.savefig(directory + os.sep + str(os.path.split(filename)[1]) + '_IV_i2.eps') figIV2.clear() figIV = plt.figure(2) ax1IV = figIV.add_subplot(111) ax1IV = PlotIV(output, AllData, current='i1', unit=1e9, axis=ax1IV, WithFit=1, PoreSize=[conductance[idx], 20e-9], title=title[idx]) figIV.tight_layout() # Save Figures figIV.savefig(directory + os.sep + str(os.path.split(filename)[1]) + 'IV_i1.png', dpi=300) figIV.savefig(directory + os.sep + str(os.path.split(filename)[1]) + 'IV_i1.eps') figIV.clear() def TwoChannelAnalysis(filenames, outdir, UpwardsOn=0): for filename in filenames: print(filename) inp = OpenFile(filename) file = os.sep + str(os.path.split(filename)[1][:-4]) if not os.path.exists(outdir): os.makedirs(outdir) # Low Pass Event Detection AnalysisResults = {} if inp['graphene']: chan = ['i1', 'i2'] # For looping over the channels else: chan = ['i1'] for sig in chan: AnalysisResults[sig] = {} AnalysisResults[sig]['RoughEventLocations'] = RecursiveLowPassFast(inp[sig], pm.coefficients[sig], inp['samplerate']) if UpwardsOn: # Upwards detection can be turned on or off AnalysisResults[sig + '_Up'] = {} AnalysisResults[sig + '_Up']['RoughEventLocations'] = RecursiveLowPassFastUp(inp[sig], pm.coefficients[sig], inp['samplerate']) # f.PlotRecursiveLPResults(AnalysisResults['i2'], inp, directory, 1e-3, channel='i2') # f.PlotRecursiveLPResults(AnalysisResults['i2_Up'], inp, directory+'UP_', 1e-3, channel='i2') # Refine the Rough Event Detection done by the LP filter and Add event infos AnalysisResults = RefinedEventDetection(inp, AnalysisResults, signals=chan, limit=pm.MinimalFittingLimit * inp['samplerate']) # Correlate the two channels if inp['graphene']: (CommonIndexes, OnlyIndexes) = f.CorrelateTheTwoChannels(AnalysisResults, 10e-3 * inp['samplerate']) print('\n\nAnalysis Done...\nThere are {} common events\n{} Events on i1 only\n{} Events on i2 only'.format( len(CommonIndexes['i1']), len(OnlyIndexes['i1']), len(OnlyIndexes['i2']))) # Plot The Events f.SaveAllPlots(CommonIndexes, OnlyIndexes, AnalysisResults, directory, inp, pm.PlotBuffer) else: # Plot The Events f.SaveAllAxopatchEvents(AnalysisResults, directory, inp, pm.PlotBuffer) \ No newline at end of file diff --git a/Plotting/PlotAllChimeraFiles.py b/Plotting/PlotAllChimeraFiles.py index 5695ce9..ab18568 100644 --- a/Plotting/PlotAllChimeraFiles.py +++ b/Plotting/PlotAllChimeraFiles.py @@ -1,32 +1,32 @@ from ObsoleteFunctions import UsefulFunctions as uf import matplotlib.pyplot as plt import numpy as np import os from matplotlib.font_manager import FontProperties from tkinter import Tk from tkinter.filedialog import askopenfilenames fontP = FontProperties() fontP.set_size('small') props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) Tk().withdraw() os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' ''') #filenames = ['/Volumes/backup/2017/Michael/Axopatch/20170512/10mMKClInFlowCellORingPore1mm.dat'] #filenames = ['/Volumes/backup/2017/Michael/Axopatch/20170824/30B_10mMKClCis100mMTrans_PosElecToCis_IV.dat'] expname = 'All' filenames = askopenfilenames(filetypes = (("chimera files", "*.log"), ("all files", "*.*"))) for filename in filenames: print(filename) out=uf.ImportChimeraRaw(filename, 200*1e3) - coefficients = {'a': 0.999, 'E': 0, 'S': 5, 'eventlengthLimit': 10e-3 * out['samplerate']} + coefficients = {'a': 0.999, 'E': 0, 'S': 5, 'maxEventLength': 10e-3 * out['samplerate']} fig1, ax = plt.subplots(1) ax.plot(np.arange(0, len(out['i1']))/out['sampleratelp'], out['i1'] * 1e9, 'r') ax.set_xlabel('Time [s]') ax.set_ylabel('Current [nA]') fig1.savefig(filename[:-4] + '.png', dpi=150) fig1.clear() plt.close() \ No newline at end of file