diff --git a/PySONIC.sublime-project b/PySONIC.sublime-project index 24a1259..4c82066 100644 --- a/PySONIC.sublime-project +++ b/PySONIC.sublime-project @@ -1,43 +1,53 @@ { "build_systems": [ { "file_regex": "^[ ]*File \"(...*?)\", line ([0-9]*)", "name": "Anaconda Python Builder", "selector": "source.python", "shell_cmd": "\"python\" -u \"$file\"" } ], "folders": [ { "file_exclude_patterns": [ "*.sublime-workspace", "MANIFEST.in", "LICENSE", "conf.py", "index.rst", "*.gitignore", "__init__.py", "*.c", "*.sh", "*.bat", "Makefile", "*.pkl" ], "folder_exclude_patterns": [ "docs", "*.egg-info", ".ipynb_checkpoints", "_build", "_static", "_templates", "__pycache__" ], "path": "." } ], + "settings": + { + "anaconda_linting": true, + "anaconda_linting_behaviour": "always", + "pep257": false, + "python_interpreter": "C:\\Users\\lemaire\\Anaconda3\\python.exe", + "test_command": "python -m unittest discover", + "use_pylint": false, + "validate_imports": true + }, "translate_tabs_to_spaces": true } diff --git a/PySONIC/core/__init__.py b/PySONIC/core/__init__.py index 321b96c..ea4592a 100644 --- a/PySONIC/core/__init__.py +++ b/PySONIC/core/__init__.py @@ -1,16 +1,41 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-06-06 13:36:00 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 13:32:37 +# @Last Modified time: 2019-06-06 16:00:03 +import inspect +import sys from .simulators import PWSimulator, PeriodicSimulator from .batches import Batch, createQueue from .model import Model from .pneuron import PointNeuron from .bls import BilayerSonophore, PmCompMethod, LennardJones from .nbls import NeuronalBilayerSonophore -from .nmodl_generator import NmodlGenerator \ No newline at end of file +from .nmodl_generator import NmodlGenerator + +from ..neurons import getPointNeuron + + +def getModelsDict(): + ''' Construct a dictionary of all model classes, indexed by simulation key. ''' + current_module = sys.modules[__name__] + models_dict = {} + for _, obj in inspect.getmembers(current_module): + if inspect.isclass(obj) and hasattr(obj, 'simkey') and isinstance(obj.simkey, str): + models_dict[obj.simkey] = obj + return models_dict + + +def getModel(key, meta): + ''' Return appropriate model object based on a sim key and a dictionary of meta-information. ''' + if key == 'MECH': + model = BilayerSonophore(meta['a'], meta['Cm0'], meta['Qm0']) + else: + model = getPointNeuron(meta['neuron']) + if key == 'ASTIM': + model = NeuronalBilayerSonophore(meta['a'], model, meta['Fdrive']) + return model diff --git a/PySONIC/core/bls.py b/PySONIC/core/bls.py index 3d2cc9d..83e97a5 100644 --- a/PySONIC/core/bls.py +++ b/PySONIC/core/bls.py @@ -1,734 +1,735 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2016-09-29 16:16:19 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 14:48:47 +# @Last Modified time: 2019-06-06 15:56:51 from enum import Enum import os import json import numpy as np import pandas as pd import scipy.integrate as integrate from scipy.optimize import brentq, curve_fit from .model import Model from .simulators import PeriodicSimulator from ..utils import logger, si_format from ..constants import * class PmCompMethod(Enum): ''' Enum: types of computation method for the intermolecular pressure ''' direct = 1 predict = 2 def LennardJones(x, beta, alpha, C, m, n): ''' Generic expression of a Lennard-Jones function, adapted for the context of symmetric deflection (distance = 2x). :param x: deflection (i.e. half-distance) :param beta: x-shifting factor :param alpha: x-scaling factor :param C: y-scaling factor :param m: exponent of the repulsion term :param n: exponent of the attraction term :return: Lennard-Jones potential at given distance (2x) ''' return C * (np.power((alpha / (2 * x + beta)), m) - np.power((alpha / (2 * x + beta)), n)) class BilayerSonophore(Model): ''' This class contains the geometric and mechanical parameters of the Bilayer Sonophore Model, as well as all the core functions needed to compute the dynamics (kinetics and kinematics) of the bilayer membrane cavitation, and run dynamic BLS simulations. ''' # BIOMECHANICAL PARAMETERS T = 309.15 # Temperature (K) delta0 = 2.0e-9 # Thickness of the leaflet (m) Delta_ = 1.4e-9 # Initial gap between the two leaflets on a non-charged membrane at equil. (m) pDelta = 1.0e5 # Attraction/repulsion pressure coefficient (Pa) m = 5.0 # Exponent in the repulsion term (dimensionless) n = 3.3 # Exponent in the attraction term (dimensionless) rhoL = 1075.0 # Density of the surrounding fluid (kg/m^3) muL = 7.0e-4 # Dynamic viscosity of the surrounding fluid (Pa.s) muS = 0.035 # Dynamic viscosity of the leaflet (Pa.s) kA = 0.24 # Area compression modulus of the leaflet (N/m) alpha = 7.56 # Tissue shear loss modulus frequency coefficient (Pa.s) C0 = 0.62 # Initial gas molar concentration in the surrounding fluid (mol/m^3) kH = 1.613e5 # Henry's constant (Pa.m^3/mol) P0 = 1.0e5 # Static pressure in the surrounding fluid (Pa) Dgl = 3.68e-9 # Diffusion coefficient of gas in the fluid (m^2/s) xi = 0.5e-9 # Boundary layer thickness for gas transport across leaflet (m) c = 1515.0 # Speed of sound in medium (m/s) # BIOPHYSICAL PARAMETERS epsilon0 = 8.854e-12 # Vacuum permittivity (F/m) epsilonR = 1.0 # Relative permittivity of intramembrane cavity (dimensionless) tscale = 'us' # relevant temporal scale of the model + simkey = 'MECH' # keyword used to characterize simulations made with this model def __init__(self, a, Cm0, Qm0, Fdrive=None, embedding_depth=0.0): ''' Constructor of the class. :param a: in-plane radius of the sonophore structure within the membrane (m) :param Cm0: membrane resting capacitance (F/m2) :param Qm0: membrane resting charge density (C/m2) :param Fdrive: frequency of acoustic perturbation (Hz) :param embedding_depth: depth of the embedding tissue around the membrane (m) ''' # Extract resting constants and geometry self.Cm0 = Cm0 self.Qm0 = Qm0 self.a = a self.d = embedding_depth self.S0 = np.pi * self.a**2 # Derive frequency-dependent tissue elastic modulus if Fdrive is not None: G_tissue = self.alpha * Fdrive # G'' (Pa) self.kA_tissue = 2 * G_tissue * self.d # kA of the tissue layer (N/m) else: self.kA_tissue = 0. # Check existence of lookups for derived parameters lookups = self.getLookups() akey = '{:.1f}'.format(a * 1e9) Qkey = '{:.2f}'.format(Qm0 * 1e5) # If no lookup, compute parameters and store them in lookup if akey not in lookups or Qkey not in lookups[akey]: # Find Delta that cancels out Pm + Pec at Z = 0 (m) if self.Qm0 == 0.0: D_eq = self.Delta_ else: (D_eq, Pnet_eq) = self.findDeltaEq(self.Qm0) assert Pnet_eq < PNET_EQ_MAX, 'High Pnet at Z = 0 with ∆ = %.2f nm' % (D_eq * 1e9) self.Delta = D_eq # Find optimal Lennard-Jones parameters to approximate PMavg (LJ_approx, std_err, _) = self.LJfitPMavg() assert std_err < PMAVG_STD_ERR_MAX, 'High error in PmAvg nonlinear fit:'\ ' std_err = %.2f Pa' % std_err self.LJ_approx = LJ_approx if akey not in lookups: lookups[akey] = {Qkey: {'LJ_approx': LJ_approx, 'Delta_eq': D_eq}} else: lookups[akey][Qkey] = {'LJ_approx': LJ_approx, 'Delta_eq': D_eq} logger.debug('Saving BLS derived parameters to lookup file') self.saveLookups(lookups) # If lookup exists, load parameters from it else: logger.debug('Loading BLS derived parameters from lookup file') self.LJ_approx = lookups[akey][Qkey]['LJ_approx'] self.Delta = lookups[akey][Qkey]['Delta_eq'] # Compute initial volume and gas content self.V0 = np.pi * self.Delta * self.a**2 self.ng0 = self.gasPa2mol(self.P0, self.V0) def __repr__(self): s = '{}({:.1f} nm'.format(self.__class__.__name__, self.a * 1e9) if self.d > 0.: s += ', d={}m'.format(si_format(self.d, precision=1, space=' ')) return s + ')' def filecode(self, Fdrive, Adrive, Qm): - return 'MECH_{:.0f}nm_{:.0f}kHz_{:.1f}kPa_{:.1f}nCcm2'.format( - self.a * 1e9, Fdrive * 1e-3, Adrive * 1e-3, Qm * 1e5) + return '{}_{:.0f}nm_{:.0f}kHz_{:.1f}kPa_{:.1f}nCcm2'.format( + self.simkey, self.a * 1e9, Fdrive * 1e-3, Adrive * 1e-3, Qm * 1e5) def getLookupsPath(self): return os.path.join(os.path.split(__file__)[0], 'bls_lookups.json') def getLookups(self): try: with open(self.getLookupsPath()) as fh: sample = json.load(fh) return sample except FileNotFoundError: return {} def saveLookups(self, lookups): with open(self.getLookupsPath(), 'w') as fh: json.dump(lookups, fh, indent=2) def getPltScheme(self): return { 'P_{AC}': ['Pac'], 'Z': ['Z'], 'n_g': ['ng'] } def getPltVars(self, wrapleft='df["', wrapright='"]'): ''' Return a dictionary with information about all plot variables related to the model. ''' return { 'Pac': { 'desc': 'acoustic pressure', 'label': 'P_{AC}', 'unit': 'kPa', 'factor': 1e-3, 'func': 'Pacoustic({0}t{1}, meta["Adrive"] * {0}stimstate{1}, meta["Fdrive"])'.format( wrapleft, wrapright) }, 'Z': { 'desc': 'leaflets deflection', 'label': 'Z', 'unit': 'nm', 'factor': 1e9, 'bounds': (-1.0, 10.0) }, 'ng': { 'desc': 'gas content', 'label': 'n_g', 'unit': '10^{-22}\ mol', 'factor': 1e22, 'bounds': (1.0, 15.0) }, 'Pmavg': { 'desc': 'average intermolecular pressure', 'label': 'P_M', 'unit': 'kPa', 'factor': 1e-3, 'func': 'PMavgpred({0}Z{1})'.format(wrapleft, wrapright) }, 'Telastic': { 'desc': 'leaflet elastic tension', 'label': 'T_E', 'unit': 'mN/m', 'factor': 1e3, 'func': 'TEleaflet({0}Z{1})'.format(wrapleft, wrapright) }, 'Cm': { 'desc': 'membrane capacitance', 'label': 'C_m', 'unit': 'uF/cm^2', 'factor': 1e2, 'bounds': (0.0, 1.5), 'func': 'v_Capct({0}Z{1})'.format(wrapleft, wrapright) } } def curvrad(self, Z): ''' Leaflet curvature radius (signed variable) :param Z: leaflet apex deflection (m) :return: leaflet curvature radius (m) ''' if Z == 0.0: return np.inf else: return (self.a**2 + Z**2) / (2 * Z) def v_curvrad(self, Z): ''' Vectorized curvrad function ''' return np.array(list(map(self.curvrad, Z))) def surface(self, Z): ''' Surface area of the stretched leaflet (spherical cap formula) :param Z: leaflet apex deflection (m) :return: stretched leaflet surface (m^2) ''' return np.pi * (self.a**2 + Z**2) def volume(self, Z): ''' Volume of the inter-leaflet space (cylinder +/- 2 spherical caps) :param Z: leaflet apex deflection (m) :return: bilayer sonophore inner volume (m^3) ''' return np.pi * self.a**2 * self.Delta\ * (1 + (Z / (3 * self.Delta) * (3 + Z**2 / self.a**2))) def arealstrain(self, Z): ''' Areal strain of the stretched leaflet epsilon = (S - S0)/S0 = (Z/a)^2 :param Z: leaflet apex deflection (m) :return: areal strain (dimensionless) ''' return (Z / self.a)**2 def Capct(self, Z): ''' Membrane capacitance (parallel-plate capacitor evaluated at average inter-layer distance) :param Z: leaflet apex deflection (m) :return: capacitance per unit area (F/m2) ''' if Z == 0.0: return self.Cm0 else: return ((self.Cm0 * self.Delta / self.a**2) * (Z + (self.a**2 - Z**2 - Z * self.Delta) / (2 * Z) * np.log((2 * Z + self.Delta) / self.Delta))) def v_Capct(self, Z): ''' Vectorized Capct function ''' return np.array(list(map(self.Capct, Z))) def derCapct(self, Z, U): ''' Evolution of membrane capacitance :param Z: leaflet apex deflection (m) :param U: leaflet apex deflection velocity (m/s) :return: time derivative of capacitance per unit area (F/m2.s) ''' dCmdZ = ((self.Cm0 * self.Delta / self.a**2) * ((Z**2 + self.a**2) / (Z * (2 * Z + self.Delta)) - ((Z**2 + self.a**2) * np.log((2 * Z + self.Delta) / self.Delta)) / (2 * Z**2))) return dCmdZ * U def localdef(self, r, Z, R): ''' Local leaflet deflection at specific radial distance (signed) :param r: in-plane distance from center of the sonophore (m) :param Z: leaflet apex deflection (m) :param R: leaflet curvature radius (m) :return: local transverse leaflet deviation (m) ''' if np.abs(Z) == 0.0: return 0.0 else: return np.sign(Z) * (np.sqrt(R**2 - r**2) - np.abs(R) + np.abs(Z)) def Pacoustic(self, t, Adrive, Fdrive, phi=np.pi): ''' Time-varying acoustic pressure :param t: time (s) :param Adrive: acoustic drive amplitude (Pa) :param Fdrive: acoustic drive frequency (Hz) :param phi: acoustic drive phase (rad) ''' return Adrive * np.sin(2 * np.pi * Fdrive * t - phi) def PMlocal(self, r, Z, R): ''' Local intermolecular pressure :param r: in-plane distance from center of the sonophore (m) :param Z: leaflet apex deflection (m) :param R: leaflet curvature radius (m) :return: local intermolecular pressure (Pa) ''' z = self.localdef(r, Z, R) relgap = (2 * z + self.Delta) / self.Delta_ return self.pDelta * ((1 / relgap)**self.m - (1 / relgap)**self.n) def PMavg(self, Z, R, S): ''' Average intermolecular pressure across the leaflet (computed by quadratic integration) :param Z: leaflet apex outward deflection value (m) :param R: leaflet curvature radius (m) :param S: surface of the stretched leaflet (m^2) :return: averaged intermolecular resultant pressure (Pa) .. warning:: quadratic integration is computationally expensive. ''' # Integrate intermolecular force over an infinitely thin ring of radius r from 0 to a fTotal, _ = integrate.quad(lambda r, Z, R: 2 * np.pi * r * self.PMlocal(r, Z, R), 0, self.a, args=(Z, R)) return fTotal / S def v_PMavg(self, Z, R, S): ''' Vectorized PMavg function ''' return np.array(list(map(self.PMavg, Z, R, S))) def LJfitPMavg(self): ''' Determine optimal parameters of a Lennard-Jones expression approximating the average intermolecular pressure. These parameters are obtained by a nonlinear fit of the Lennard-Jones function for a range of deflection values between predetermined Zmin and Zmax. :return: 3-tuple with optimized LJ parameters for PmAvg prediction (Map) and the standard and max errors of the prediction in the fitting range (in Pascals) ''' # Determine lower bound of deflection range: when Pm = Pmmax PMmax = LJFIT_PM_MAX # Pa Zminlb = -0.49 * self.Delta Zminub = 0.0 Zmin = brentq(lambda Z, Pmmax: self.PMavg(Z, self.curvrad(Z), self.surface(Z)) - PMmax, Zminlb, Zminub, args=(PMmax), xtol=1e-16) # Create vectors for geometric variables Zmax = 2 * self.a Z = np.arange(Zmin, Zmax, 1e-11) Pmavg = self.v_PMavg(Z, self.v_curvrad(Z), self.surface(Z)) # Compute optimal nonlinear fit of custom LJ function with initial guess x0_guess = self.delta0 C_guess = 0.1 * self.pDelta nrep_guess = self.m nattr_guess = self.n pguess = (x0_guess, C_guess, nrep_guess, nattr_guess) popt, _ = curve_fit(lambda x, x0, C, nrep, nattr: LennardJones(x, self.Delta, x0, C, nrep, nattr), Z, Pmavg, p0=pguess, maxfev=10000) (x0_opt, C_opt, nrep_opt, nattr_opt) = popt Pmavg_fit = LennardJones(Z, self.Delta, x0_opt, C_opt, nrep_opt, nattr_opt) # Compute prediction error residuals = Pmavg - Pmavg_fit ss_res = np.sum(residuals**2) N = residuals.size std_err = np.sqrt(ss_res / N) max_err = max(np.abs(residuals)) logger.debug('LJ approx: x0 = %.2f nm, C = %.2f kPa, m = %.2f, n = %.2f', x0_opt * 1e9, C_opt * 1e-3, nrep_opt, nattr_opt) LJ_approx = {"x0": x0_opt, "C": C_opt, "nrep": nrep_opt, "nattr": nattr_opt} return (LJ_approx, std_err, max_err) def PMavgpred(self, Z): ''' Approximated average intermolecular pressure (using nonlinearly fitted Lennard-Jones function) :param Z: leaflet apex deflection (m) :return: predicted average intermolecular pressure (Pa) ''' return LennardJones(Z, self.Delta, self.LJ_approx['x0'], self.LJ_approx['C'], self.LJ_approx['nrep'], self.LJ_approx['nattr']) def Pelec(self, Z, Qm): ''' Electrical pressure term :param Z: leaflet apex deflection (m) :param Qm: membrane charge density (C/m2) :return: electrical pressure (Pa) ''' relS = self.S0 / self.surface(Z) abs_perm = self.epsilon0 * self.epsilonR # F/m return - relS * Qm**2 / (2 * abs_perm) # Pa def findDeltaEq(self, Qm): ''' Compute the Delta that cancels out the (Pm + Pec) equation at Z = 0 for a given membrane charge density, using the Brent method to refine the pressure root iteratively. :param Qm: membrane charge density (C/m2) :return: equilibrium value (m) and associated pressure (Pa) ''' def dualPressure(Delta): x = (self.Delta_ / Delta) return (self.pDelta * (x**self.m - x**self.n) + self.Pelec(0.0, Qm)) Delta_eq = brentq(dualPressure, 0.1 * self.Delta_, 2.0 * self.Delta_, xtol=1e-16) logger.debug('∆eq = %.2f nm', Delta_eq * 1e9) return (Delta_eq, dualPressure(Delta_eq)) def gasFlux(self, Z, P): ''' Gas molar flux through the sonophore boundary layers :param Z: leaflet apex deflection (m) :param P: internal gas pressure (Pa) :return: gas molar flux (mol/s) ''' dC = self.C0 - P / self.kH return 2 * self.surface(Z) * self.Dgl * dC / self.xi def gasmol2Pa(self, ng, V): ''' Internal gas pressure for a given molar content :param ng: internal molar content (mol) :param V: sonophore inner volume (m^3) :return: internal gas pressure (Pa) ''' return ng * Rg * self.T / V def gasPa2mol(self, P, V): ''' Internal gas molar content for a given pressure :param P: internal gas pressure (Pa) :param V: sonophore inner volume (m^3) :return: internal gas molar content (mol) ''' return P * V / (Rg * self.T) def PtotQS(self, Z, ng, Qm, Pac, Pm_comp_method): ''' Net quasi-steady pressure for a given acoustic pressure (Ptot = Pm + Pg + Pec - P0 - Pac) :param Z: leaflet apex deflection (m) :param ng: internal molar content (mol) :param Qm: membrane charge density (C/m2) :param Pac: acoustic pressure (Pa) :param Pm_comp_method: computation method for average intermolecular pressure :return: total balance pressure (Pa) ''' if Pm_comp_method is PmCompMethod.direct: Pm = self.PMavg(Z, self.curvrad(Z), self.surface(Z)) elif Pm_comp_method is PmCompMethod.predict: Pm = self.PMavgpred(Z) return Pm + self.gasmol2Pa(ng, self.volume(Z)) - self.P0 - Pac + self.Pelec(Z, Qm) def balancedefQS(self, ng, Qm, Pac=0.0, Pm_comp_method=PmCompMethod.predict): ''' Quasi-steady equilibrium deflection for a given acoustic pressure (computed by approximating the root of quasi-steady pressure) :param ng: internal molar content (mol) :param Qm: membrane charge density (C/m2) :param Pac: external acoustic perturbation (Pa) :param Pm_comp_method: computation method for average intermolecular pressure :return: leaflet deflection canceling quasi-steady pressure (m) ''' lb = -0.49 * self.Delta ub = self.a Plb = self.PtotQS(lb, ng, Qm, Pac, Pm_comp_method) Pub = self.PtotQS(ub, ng, Qm, Pac, Pm_comp_method) assert (Plb > 0 > Pub), '[%d, %d] is not a sign changing interval for PtotQS' % (lb, ub) return brentq(self.PtotQS, lb, ub, args=(ng, Qm, Pac, Pm_comp_method), xtol=1e-16) def TEleaflet(self, Z): ''' Elastic tension in leaflet :param Z: leaflet apex deflection (m) :return: circumferential elastic tension (N/m) ''' return self.kA * self.arealstrain(Z) def TEtissue(self, Z): ''' Elastic tension in surrounding viscoelastic layer :param Z: leaflet apex deflection (m) :return: circumferential elastic tension (N/m) ''' return self.kA_tissue * self.arealstrain(Z) def TEtot(self, Z): ''' Total elastic tension (leaflet + surrounding viscoelastic layer) :param Z: leaflet apex deflection (m) :return: circumferential elastic tension (N/m) ''' return self.TEleaflet(Z) + self.TEtissue(Z) def PEtot(self, Z, R): ''' Total elastic tension pressure (leaflet + surrounding viscoelastic layer) :param Z: leaflet apex deflection (m) :param R: leaflet curvature radius (m) :return: elastic tension pressure (Pa) ''' return - self.TEtot(Z) / R def PVleaflet(self, U, R): ''' Viscous stress pressure in leaflet :param U: leaflet apex deflection velocity (m/s) :param R: leaflet curvature radius (m) :return: leaflet viscous stress pressure (Pa) ''' return - 12 * U * self.delta0 * self.muS / R**2 def PVfluid(self, U, R): ''' Viscous stress pressure in surrounding medium :param U: leaflet apex deflection velocity (m/s) :param R: leaflet curvature radius (m) :return: fluid viscous stress pressure (Pa) ''' return - 4 * U * self.muL / np.abs(R) def accP(self, Ptot, R): ''' Leaflet transverse acceleration resulting from pressure imbalance :param Ptot: net pressure (Pa) :param R: leaflet curvature radius (m) :return: pressure-driven acceleration (m/s^2) ''' return Ptot / (self.rhoL * np.abs(R)) def accNL(self, U, R): ''' Leaflet transverse nonlinear acceleration :param U: leaflet apex deflection velocity (m/s) :param R: leaflet curvature radius (m) :return: nonlinear acceleration term (m/s^2) .. note:: A simplified version of nonlinear acceleration (neglecting dR/dH) is used here. ''' # return - (3/2 - 2*R/H) * U**2 / R return -(3 * U**2) / (2 * R) def derivatives(self, y, t, Adrive, Fdrive, Qm, phi, Pm_comp_method=PmCompMethod.predict): ''' Evolution of the mechanical system :param y: vector of HH system variables at time t :param t: time instant (s) :param Adrive: acoustic drive amplitude (Pa) :param Fdrive: acoustic drive frequency (Hz) :param Qm: membrane charge density (F/m2) :param phi: acoustic drive phase (rad) :param Pm_comp_method: computation method for average intermolecular pressure :return: vector of mechanical system derivatives at time t ''' # Split input vector explicitly U, Z, ng = y # Correct deflection value is below critical compression if Z < -0.5 * self.Delta: logger.warning('Deflection out of range: Z = %.2f nm', Z * 1e9) Z = -0.49 * self.Delta # Compute curvature radius R = self.curvrad(Z) # Compute total pressure Pg = self.gasmol2Pa(ng, self.volume(Z)) if Pm_comp_method is PmCompMethod.direct: Pm = self.PMavg(Z, self.curvrad(Z), self.surface(Z)) elif Pm_comp_method is PmCompMethod.predict: Pm = self.PMavgpred(Z) Ptot = (Pm + Pg - self.P0 - self.Pacoustic(t, Adrive, Fdrive, phi) + self.PEtot(Z, R) + self.PVleaflet(U, R) + self.PVfluid(U, R) + self.Pelec(Z, Qm)) # Compute derivatives dUdt = self.accP(Ptot, R) + self.accNL(U, R) dZdt = U dngdt = self.gasFlux(Z, Pg) # Return derivatives vector return [dUdt, dZdt, dngdt] def checkInputs(self, Fdrive, Adrive, Qm, phi): ''' Check validity of stimulation parameters :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param phi: acoustic drive phase (rad) :param Qm: imposed membrane charge density (C/m2) ''' if not all(isinstance(param, float) for param in [Fdrive, Adrive, Qm, phi]): raise TypeError('Invalid stimulation parameters (must be float typed)') if Fdrive <= 0: raise ValueError('Invalid US driving frequency: {} kHz (must be strictly positive)' .format(Fdrive * 1e-3)) if Adrive < 0: raise ValueError('Invalid US pressure amplitude: {} kPa (must be positive or null)' .format(Adrive * 1e-3)) if Qm < CHARGE_RANGE[0] or Qm > CHARGE_RANGE[1]: raise ValueError('Invalid applied charge: {} nC/cm2 (must be within [{}, {}] interval' .format(Qm * 1e5, CHARGE_RANGE[0] * 1e5, CHARGE_RANGE[1] * 1e5)) if phi < 0 or phi >= 2 * np.pi: raise ValueError('Invalid US pressure phase: {:.2f} rad (must be within [0, 2 PI[ rad' .format(phi)) def meta(self, Fdrive, Adrive, Qm): ''' Return information about object and simulation parameters. :param Fdrive: US frequency (Hz) :param Adrive: acoustic pressure amplitude (Pa) :param Qm: applied membrane charge density (C/m2) :return: meta-data dictionary ''' return { 'a': self.a, 'd': self.d, 'Cm0': self.Cm0, 'Qm0': self.Qm0, 'Fdrive': Fdrive, 'Adrive': Adrive, 'Qm': Qm } def simulate(self, Fdrive, Adrive, Qm, phi=np.pi, Pm_comp_method=PmCompMethod.predict): ''' Simulate system until periodic stabilization for a specific set of ultrasound parameters, and return output data in a dataframe. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param phi: acoustic drive phase (rad) :param Qm: imposed membrane charge density (C/m2) :param Pm_comp_method: type of method used to compute average intermolecular pressure :return: 2-tuple with the output dataframe and computation time. ''' logger.info('%s: simulation @ f = %sHz, A = %sPa, Q = %sC/cm2', self, *si_format([Fdrive, Adrive, Qm * 1e-4], 2, space=' ')) # Check validity of stimulation parameters self.checkInputs(Fdrive, Adrive, Qm, phi) # Determine time step dt = 1 / (NPC_FULL * Fdrive) # Compute non-zero deflection value for a small perturbation (solving quasi-steady equation) Pac = self.Pacoustic(dt, Adrive, Fdrive, phi) Z0 = self.balancedefQS(self.ng0, Qm, Pac, Pm_comp_method) # Set initial conditions y0 = np.array([0., Z0, self.ng0]) # Initialize simulator and compute solution simulator = PeriodicSimulator( lambda y, t: self.derivatives(y, t, Adrive, Fdrive, Qm, phi, Pm_comp_method), ivars_to_check=[1, 2]) (t, y, stim), tcomp = simulator(y0, dt, 1. / Fdrive, monitor_time=True) logger.debug('completed in %ss', si_format(tcomp, 1)) # Set last stimulation state to zero stim[-1] = 0 # Store output in dataframe data = pd.DataFrame({ 't': t, 'stimstate': stim, 'Z': y[:, 1], 'ng': y[:, 2] }) # Return dataframe and computation time return data, tcomp def getCycleProfiles(self, Fdrive, Adrive, Qm): ''' Simulate mechanical system and compute pressures over the last acoustic cycle :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param Qm: imposed membrane charge density (C/m2) :return: dataframe with the time, kinematic and pressure profiles over the last cycle. ''' # Run default simulation and compute relevant profiles logger.info('Running mechanical simulation (a = %sm, f = %sHz, A = %sPa)', si_format(self.a, 1), si_format(Fdrive, 1), si_format(Adrive, 1)) data, _ = self.simulate(Fdrive, Adrive, Qm, Pm_comp_method=PmCompMethod.direct) t, Z, ng = [data.loc[-NPC_FULL:, key].values for key in ['t', 'Z', 'ng']] dt = (t[-1] - t[0]) / (NPC_FULL - 1) t -= t[0] # Compute pressure cyclic profiles logger.info('Computing pressure cyclic profiles') R = self.v_curvrad(Z) U = np.diff(Z) / dt U = np.hstack((U, U[-1])) data = { 't': t, 'Z': Z, 'Cm': self.v_Capct(Z), 'P_M': self.v_PMavg(Z, R, self.surface(Z)), 'P_Q': self.Pelec(Z, Qm), 'P_{VE}': self.PEtot(Z, R) + self.PVleaflet(U, R), 'P_V': self.PVfluid(U, R), 'P_G': self.gasmol2Pa(ng, self.volume(Z)), 'P_0': - np.ones(Z.size) * self.P0 } return pd.DataFrame(data, columns=data.keys()) diff --git a/PySONIC/core/model.py b/PySONIC/core/model.py index bc5675b..5fc3083 100644 --- a/PySONIC/core/model.py +++ b/PySONIC/core/model.py @@ -1,116 +1,122 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-08-03 11:53:04 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 15:34:25 +# @Last Modified time: 2019-06-06 15:57:58 import os import pickle import abc import inspect import numpy as np from .batches import createQueue from ..utils import logger, loadData class Model(metaclass=abc.ABCMeta): ''' Generic model interface. ''' @property @abc.abstractmethod def tscale(self): ''' Relevant temporal scale of the model. ''' raise NotImplementedError + @property + @abc.abstractmethod + def simkey(self): + ''' Keyword used to characterize simulations made with the model. ''' + raise NotImplementedError + @property @abc.abstractmethod def __repr__(self): raise NotImplementedError def params(self): ''' Gather all model parameters in a dictionary ''' def toAvoid(p): return (p.startswith('__') and p.endswith('__')) or p.startswith('_abc_') class_attrs = inspect.getmembers(self.__class__, lambda a: not(inspect.isroutine(a))) inst_attrs = inspect.getmembers(self, lambda a: not(inspect.isroutine(a))) class_attrs = [a for a in class_attrs if not toAvoid(a[0])] inst_attrs = [a for a in inst_attrs if not toAvoid(a[0]) and a not in class_attrs] params_dict = {a[0]: a[1] for a in class_attrs + inst_attrs} return params_dict @property @abc.abstractmethod def filecode(self, *args): raise NotImplementedError def getDesc(self): return inspect.getdoc(self).splitlines()[0] @property @abc.abstractmethod def getPltScheme(self): raise NotImplementedError @property @abc.abstractmethod def getPltVars(self, *args, **kwargs): raise NotImplementedError @property @abc.abstractmethod def checkInputs(self, *args): raise NotImplementedError @property @abc.abstractmethod def meta(self, *args): raise NotImplementedError @property @abc.abstractmethod def simulate(self, *args, **kwargs): raise NotImplementedError def simQueue(self, *args): ''' Create a simulation queue from a combination of simulation parameters. ''' return createQueue(*args) def runAndSave(self, outdir, *args): ''' Simulate system and save results in a PKL file. ''' # If no amplitude provided, perform titration to find it if None in args: iA = args.index(None) new_args = [x for x in args if x is not None] Athr = self.titrate(*new_args) if np.isnan(Athr): logger.error('Could not find threshold excitation amplitude') return None new_args.insert(iA, Athr) args = new_args # Simulate model, save file and return file path data, tcomp = self.simulate(*args) meta = self.meta(*args) meta['tcomp'] = tcomp fpath = '{}/{}.pkl'.format(outdir, self.filecode(*args)) with open(fpath, 'wb') as fh: pickle.dump({'meta': meta, 'data': data}, fh) logger.debug('simulation data exported to "%s"', fpath) return fpath def load(self, outdir, *args): ''' Load output data for a specific parameters combination. ''' # Get file path from simulation parameters fpath = '{}/{}.pkl'.format(outdir, self.filecode(*args)) # If output file does not exist, run simulation to generate it if not os.path.isfile(fpath): self.runAndSave(outdir, *args) # Return data and meta-data return loadData(fpath) diff --git a/PySONIC/core/nbls.py b/PySONIC/core/nbls.py index cc345a9..f3e8816 100644 --- a/PySONIC/core/nbls.py +++ b/PySONIC/core/nbls.py @@ -1,647 +1,648 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2016-09-29 16:16:19 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 15:27:53 +# @Last Modified time: 2019-06-06 15:50:29 from copy import deepcopy import logging import numpy as np import pandas as pd from scipy.interpolate import interp1d from scipy.integrate import solve_ivp from .simulators import PWSimulator, HybridSimulator from .bls import BilayerSonophore from .pneuron import PointNeuron from .batches import createQueue from ..utils import * from ..constants import * from ..postpro import getFixedPoints class NeuronalBilayerSonophore(BilayerSonophore): ''' This class inherits from the BilayerSonophore class and receives an PointNeuron instance at initialization, to define the electro-mechanical NICE model and its SONIC variant. ''' tscale = 'ms' # relevant temporal scale of the model + simkey = 'ASTIM' # keyword used to characterize simulations made with this model def __init__(self, a, neuron, Fdrive=None, embedding_depth=0.0): ''' Constructor of the class. :param a: in-plane radius of the sonophore structure within the membrane (m) :param neuron: neuron object :param Fdrive: frequency of acoustic perturbation (Hz) :param embedding_depth: depth of the embedding tissue around the membrane (m) ''' # Check validity of input parameters if not isinstance(neuron, PointNeuron): raise ValueError('Invalid neuron type: "{}" (must inherit from PointNeuron class)' .format(neuron.name)) self.neuron = neuron # Initialize BilayerSonophore parent object BilayerSonophore.__init__(self, a, neuron.Cm0, neuron.Cm0 * neuron.Vm0 * 1e-3, embedding_depth) def __repr__(self): s = '{}({:.1f} nm, {}'.format(self.__class__.__name__, self.a * 1e9, self.neuron) if self.d > 0.: s += ', d={}m'.format(si_format(self.d, precision=1, space=' ')) return s + ')' def params(self): params = super().params() params.update(self.neuron.params()) return params def getPltVars(self, wrapleft='df["', wrapright='"]'): pltvars = super().getPltVars(wrapleft, wrapright) pltvars.update(self.neuron.getPltVars(wrapleft, wrapright)) return pltvars def getPltScheme(self): return self.neuron.getPltScheme() def filecode(self, Fdrive, Adrive, tstim, toffset, PRF, DC, method='sonic'): - return 'ASTIM_{}_{}_{:.0f}nm_{:.0f}kHz_{:.2f}kPa_{:.0f}ms_{}{}'.format( - self.neuron.name, 'CW' if DC == 1 else 'PW', self.a * 1e9, + return '{}_{}_{}_{:.0f}nm_{:.0f}kHz_{:.2f}kPa_{:.0f}ms_{}{}'.format( + self.simkey, self.neuron.name, 'CW' if DC == 1 else 'PW', self.a * 1e9, Fdrive * 1e-3, Adrive * 1e-3, tstim * 1e3, 'PRF{:.2f}Hz_DC{:.2f}%_'.format(PRF, DC * 1e2) if DC < 1. else '', method) def fullDerivatives(self, y, t, Adrive, Fdrive, phi): ''' Compute the derivatives of the (n+3) ODE full NBLS system variables. :param y: vector of state variables :param t: specific instant in time (s) :param Adrive: acoustic drive amplitude (Pa) :param Fdrive: acoustic drive frequency (Hz) :param phi: acoustic drive phase (rad) :return: vector of derivatives ''' dydt_mech = BilayerSonophore.derivatives(self, y[:3], t, Adrive, Fdrive, y[3], phi) dydt_elec = self.neuron.Qderivatives(y[3:], t, self.Capct(y[1])) return dydt_mech + dydt_elec def effDerivatives(self, y, t, lkp): ''' Compute the derivatives of the n-ODE effective HH system variables, based on 1-dimensional linear interpolation of "effective" coefficients that summarize the system's behaviour over an acoustic cycle. :param y: vector of HH system variables at time t :param t: specific instant in time (s) :param lkp: dictionary of 1D data points of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: vector of effective system derivatives at time t ''' # Split input vector explicitly Qm, *states = y # Compute charge and channel states variation Vmeff = self.neuron.interpVmeff(Qm, lkp) dQmdt = - self.neuron.iNet(Vmeff, states) * 1e-3 dstates = self.neuron.derEffStates(Qm, states, lkp) # Return derivatives vector return [dQmdt, *[dstates[k] for k in self.neuron.states]] def interpEffVariable(self, key, Qm, stim, lkps1D): ''' Interpolate Q-dependent effective variable along solution. :param key: lookup variable key :param Qm: charge density solution vector :param stim: stimulation state solution vector :param lkps1D: dictionary of lookups for ON and OFF states :return: interpolated effective variable vector ''' x = np.zeros(stim.size) x[stim == 0] = np.interp( Qm[stim == 0], lkps1D['ON']['Q'], lkps1D['ON'][key], left=np.nan, right=np.nan) x[stim == 1] = np.interp( Qm[stim == 1], lkps1D['ON']['Q'], lkps1D['OFF'][key], left=np.nan, right=np.nan) return x def runFull(self, Fdrive, Adrive, tstim, toffset, PRF, DC, phi=np.pi): ''' Compute solutions of the full electro-mechanical system for a specific set of US stimulation parameters, using a classic integration scheme. The first iteration uses the quasi-steady simplification to compute the initiation of motion from a flat leaflet configuration. Afterwards, the ODE system is solved iteratively until completion. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :param phi: acoustic drive phase (rad) :return: 2-tuple with the output dataframe and computation time. ''' # Determine time step dt = 1 / (NPC_FULL * Fdrive) # Compute non-zero deflection value for a small perturbation (solving quasi-steady equation) Pac = self.Pacoustic(dt, Adrive, Fdrive, phi) Z0 = self.balancedefQS(self.ng0, self.Qm0, Pac) # Set initial conditions steady_states = self.neuron.steadyStates(self.neuron.Vm0) y0 = np.concatenate(( [0., Z0, self.ng0, self.Qm0], [steady_states[k] for k in self.neuron.states])) # Initialize simulator and compute solution logger.debug('Computing detailed solution') simulator = PWSimulator( lambda y, t: self.fullDerivatives(y, t, Adrive, Fdrive, phi), lambda y, t: self.fullDerivatives(y, t, 0., 0., 0.)) (t, y, stim), tcomp = simulator( y0, dt, tstim, toffset, PRF, DC, print_progress=logger.getEffectiveLevel() <= logging.INFO, target_dt=CLASSIC_TARGET_DT, monitor_time=True) logger.debug('completed in %ss', si_format(tcomp, 1)) # Store output in dataframe data = pd.DataFrame({ 't': t, 'stimstate': stim, 'Z': y[:, 1], 'ng': y[:, 2], 'Qm': y[:, 3] }) data['Vm'] = data['Qm'].values / self.v_Capct(data['Z'].values) * 1e3 # mV for i in range(len(self.neuron.states)): data[self.neuron.states[i]] = y[:, i + 4] # Return dataframe and computation time return data, tcomp def runHybrid(self, Fdrive, Adrive, tstim, toffset, PRF, DC, phi=np.pi): ''' Compute solutions of the system for a specific set of US stimulation parameters, using a hybrid integration scheme. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param phi: acoustic drive phase (rad) :return: 3-tuple with the time profile, the solution matrix and a state vector ''' # Determine time steps dt_dense, dt_sparse = [1. / (n * Fdrive) for n in [NPC_FULL, NPC_HH]] # Compute non-zero deflection value for a small perturbation (solving quasi-steady equation) Pac = self.Pacoustic(dt_dense, Adrive, Fdrive, phi) Z0 = self.balancedefQS(self.ng0, self.Qm0, Pac) # Set initial conditions steady_states = self.neuron.steadyStates(self.neuron.Vm0) y0 = np.concatenate(( [0., Z0, self.ng0, self.Qm0], [steady_states[k] for k in self.neuron.states], )) is_dense_var = np.array([True] * 3 + [False] * (len(self.neuron.states) + 1)) # Initialize simulator and compute solution logger.debug('Computing hybrid solution') simulator = HybridSimulator( lambda y, t: self.fullDerivatives(y, t, Adrive, Fdrive, phi), lambda y, t: self.fullDerivatives(y, t, 0., 0., 0.), lambda t, y, Cm: self.neuron.Qderivatives(y, t, Cm), lambda yref: self.Capct(yref[1]), is_dense_var, ivars_to_check=[1, 2]) (t, y, stim), tcomp = simulator( y0, dt_dense, dt_sparse, 1. / Fdrive, tstim, toffset, PRF, DC, monitor_time=True) logger.debug('completed in %ss', si_format(tcomp, 1)) # Store output in dataframe data = pd.DataFrame({ 't': t, 'stimstate': stim, 'Z': y[:, 1], 'ng': y[:, 2], 'Qm': y[:, 3] }) data['Vm'] = data['Qm'].values / self.v_Capct(data['Z'].values) * 1e3 # mV for i in range(len(self.neuron.states)): data[self.neuron.states[i]] = y[:, i + 4] # Return dataframe and computation time return data, tcomp def computeEffVars(self, Fdrive, Adrive, Qm, fs): ''' Compute "effective" coefficients of the HH system for a specific combination of stimulus frequency, stimulus amplitude and charge density. A short mechanical simulation is run while imposing the specific charge density, until periodic stabilization. The HH coefficients are then averaged over the last acoustic cycle to yield "effective" coefficients. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param Qm: imposed charge density (C/m2) :param fs: list of sonophore membrane coverage fractions :return: list with computation time and a list of dictionaries of effective variables ''' # Run simulation and retrieve deflection and gas content vectors from last cycle data, tcomp = BilayerSonophore.simulate(self, Fdrive, Adrive, Qm) Z_last = data.loc[-NPC_FULL:, 'Z'].values # m Cm_last = self.v_Capct(Z_last) # F/m2 # For each coverage fraction effvars = [] for x in fs: # Compute membrane capacitance and membrane potential vectors Cm = x * Cm_last + (1 - x) * self.Cm0 # F/m2 Vm = Qm / Cm * 1e3 # mV # Compute average cycle value for membrane potential and rate constants effvars.append({'V': np.mean(Vm)}) effvars[-1].update(self.neuron.computeEffRates(Vm)) # Log process log = '{}: lookups @ {}Hz, {}Pa, {:.2f} nC/cm2'.format( self, *si_format([Fdrive, Adrive], precision=1, space=' '), Qm * 1e5) if len(fs) > 1: log += ', fs = {:.0f} - {:.0f}%'.format(fs.min() * 1e2, fs.max() * 1e2) log += ', tcomp = {:.3f} s'.format(tcomp) logger.info(log) # Return effective coefficients return [tcomp, effvars] def runSONIC(self, Fdrive, Adrive, tstim, toffset, PRF, DC): ''' Compute solutions of the system for a specific set of US stimulation parameters, using charge-predicted "effective" coefficients to solve the HH equations at each step. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :return: 3-tuple with the time profile, the effective solution matrix and a state vector ''' # Load appropriate 2D lookups Aref, Qref, lkps2D, _ = getLookups2D(self.neuron.name, a=self.a, Fdrive=Fdrive) # Check that acoustic amplitude is within lookup range Adrive = isWithin('amplitude', Adrive, (Aref.min(), Aref.max())) # Interpolate 2D lookups at zero and US amplitude logger.debug('Interpolating lookups at A = %.2f kPa and A = 0', Adrive * 1e-3) lkps1D = {state: {key: interp1d(Aref, y2D, axis=0)(val) for key, y2D in lkps2D.items()} for state, val in {'ON': Adrive, 'OFF': 0.}.items()} # Add reference charge vector to 1D lookup dictionaries for state in lkps1D.keys(): lkps1D[state]['Q'] = Qref # Set initial conditions steady_states = self.neuron.steadyStates(self.neuron.Vm0) y0 = np.insert(np.array([steady_states[k] for k in self.neuron.states]), 0, self.Qm0) # Initialize simulator and compute solution logger.debug('Computing effective solution') simulator = PWSimulator( lambda y, t: self.effDerivatives(y, t, lkps1D['ON']), lambda y, t: self.effDerivatives(y, t, lkps1D['OFF'])) (t, y, stim), tcomp = simulator(y0, DT_EFF, tstim, toffset, PRF, DC, monitor_time=True) logger.debug('completed in %ss', si_format(tcomp, 1)) # Store output in dataframe data = pd.DataFrame({ 't': t, 'stimstate': stim, 'Qm': y[:, 0] }) for key in ['ng', 'V']: data[key] = self.interpEffVariable(key, data['Qm'].values, stim, lkps1D) data['Z'] = np.array([self.balancedefQS(ng, Qm) for ng, Qm in zip( data['ng'].values, data['Qm'].values)]) # m data['Vm'] = data['Qm'].values / self.v_Capct(data['Z'].values) * 1e3 # mV for i in range(len(self.neuron.states)): data[self.neuron.states[i]] = y[:, i + 1] # Return dataframe and computation time return data, tcomp def meta(self, Fdrive, Adrive, tstim, toffset, PRF, DC, method): ''' Return information about object and simulation parameters. :param Fdrive: US frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param tstim: stimulus duration (s) :param toffset: stimulus offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: stimulus duty cycle (-) :param method: integration method :return: meta-data dictionary ''' return { 'neuron': self.neuron.name, 'a': self.a, 'd': self.d, 'Fdrive': Fdrive, 'Adrive': Adrive, 'tstim': tstim, 'toffset': toffset, 'PRF': PRF, 'DC': DC, 'method': method } def simulate(self, Fdrive, Adrive, tstim, toffset, PRF=100., DC=1.0, method='sonic'): ''' Simulate the electro-mechanical model for a specific set of US stimulation parameters, and return output data in a dataframe. :param Fdrive: acoustic drive frequency (Hz) :param Adrive: acoustic drive amplitude (Pa) :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :param method: selected integration method :return: 2-tuple with the output dataframe and computation time. ''' logger.info( '%s: simulation @ f = %sHz, A = %sPa, t = %ss (%ss offset)%s', self, si_format(Fdrive, 0, space=' '), si_format(Adrive, 2, space=' '), *si_format([tstim, toffset], 1, space=' '), (', PRF = {}Hz, DC = {:.2f}%'.format( si_format(PRF, 2, space=' '), DC * 1e2) if DC < 1.0 else '')) # Check validity of stimulation parameters BilayerSonophore.checkInputs(self, Fdrive, Adrive, 0.0, 0.0) self.neuron.checkInputs(Adrive, tstim, toffset, PRF, DC) # Call appropriate simulation function try: simfunc = { 'full': self.runFull, 'hybrid': self.runHybrid, 'sonic': self.runSONIC }[method] except KeyError: raise ValueError('Invalid integration method: "{}"'.format(method)) data, tcomp = simfunc(Fdrive, Adrive, tstim, toffset, PRF, DC) # Log number of detected spikes nspikes = self.neuron.getNSpikes(data) logger.debug('{} spike{} detected'.format(nspikes, plural(nspikes))) # Return dataframe and computation time return data, tcomp @cache(os.path.join(os.path.split(__file__)[0], 'astim_titrations.log')) def titrate(self, Fdrive, tstim, toffset, PRF=100., DC=1., method='sonic', xfunc=None, Arange=None): ''' Use a binary search to determine the threshold amplitude needed to obtain neural excitation for a given frequency, duration, PRF and duty cycle. :param Fdrive: US frequency (Hz) :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :param method: integration method :param xfunc: function determining whether condition is reached from simulation output :param Arange: search interval for Adrive, iteratively refined :return: determined threshold amplitude (Pa) ''' # Default output function if xfunc is None: xfunc = self.neuron.titrationFunc # Default amplitude interval if Arange is None: Arange = (0, getLookups2D(self.neuron.name, a=self.a, Fdrive=Fdrive)[0].max()) return binarySearch( lambda x: xfunc(self.simulate(*x)[0]), [Fdrive, tstim, toffset, PRF, DC, method], 1, Arange, TITRATION_ASTIM_DA_MAX ) def simQueue(self, freqs, amps, durations, offsets, PRFs, DCs, method): ''' Create a serialized 2D array of all parameter combinations for a series of individual parameter sweeps, while avoiding repetition of CW protocols for a given PRF sweep. :param freqs: list (or 1D-array) of US frequencies :param amps: list (or 1D-array) of acoustic amplitudes :param durations: list (or 1D-array) of stimulus durations :param offsets: list (or 1D-array) of stimulus offsets (paired with durations array) :param PRFs: list (or 1D-array) of pulse-repetition frequencies :param DCs: list (or 1D-array) of duty cycle values :params method: integration method :return: list of parameters (list) for each simulation ''' if amps is None: amps = [np.nan] DCs = np.array(DCs) queue = [] if 1.0 in DCs: queue += createQueue(freqs, amps, durations, offsets, min(PRFs), 1.0) if np.any(DCs != 1.0): queue += createQueue(freqs, amps, durations, offsets, PRFs, DCs[DCs != 1.0]) for item in queue: if np.isnan(item[1]): item[1] = None item.append(method) return queue def quasiSteadyStates(self, Fdrive, amps=None, charges=None, DCs=1.0, squeeze_output=False): ''' Compute the quasi-steady state values of the neuron's gating variables for a combination of US amplitudes, charge densities and duty cycles, at a specific US frequency. :param Fdrive: US frequency (Hz) :param amps: US amplitudes (Pa) :param charges: membrane charge densities (C/m2) :param DCs: duty cycle value(s) :return: 4-tuple with reference values of US amplitude and charge density, as well as interpolated Vmeff and QSS gating variables ''' # Get DC-averaged lookups interpolated at the appropriate amplitudes and charges amps, charges, lookups = getLookupsDCavg( self.neuron.name, self.a, Fdrive, amps, charges, DCs) # Compute QSS states using these lookups nA, nQ, nDC = lookups['V'].shape QSS = {k: np.empty((nA, nQ, nDC)) for k in self.neuron.states} for iA in range(nA): for iDC in range(nDC): QSS_1D = self.neuron.quasiSteadyStates( {k: v[iA, :, iDC] for k, v in lookups.items()}) for k in QSS.keys(): QSS[k][iA, :, iDC] = QSS_1D[k] # Compress outputs if needed if squeeze_output: QSS = {k: v.squeeze() for k, v in QSS.items()} lookups = {k: v.squeeze() for k, v in lookups.items()} # Return reference inputs and outputs return amps, charges, lookups, QSS def iNetQSS(self, Qm, Fdrive, Adrive, DC): ''' Compute quasi-steady state net membrane current for a given combination of US parameters and a given membrane charge density. :param Qm: membrane charge density (C/m2) :param Fdrive: US frequency (Hz) :param Adrive: US amplitude (Pa) :param DC: duty cycle (-) :return: net membrane current (mA/m2) ''' _, _, lookups, QSS = self.quasiSteadyStates( Fdrive, amps=Adrive, charges=Qm, DCs=DC, squeeze_output=True) return self.neuron.iNet(lookups['V'], np.array(list(QSS.values()))) # mA/m2 def evaluateStability(self, Qm0, states0, lkp): ''' Integrate the effective differential system from a given starting point, until clear convergence or clear divergence is found. :param Qm0: initial membrane charge density (C/m2) :param states0: dictionary of initial states values :param lkp: dictionary of 1D data points of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: boolean indicating convergence state ''' # Initialize y0 vector t0 = 0. y0 = np.array([Qm0] + list(states0.values())) # # Initialize simulator and compute solution # simulator = PeriodicSimulator( # lambda y, t: self.effDerivatives(y, t, lkp), # ivars_to_check=[0]) # simulator.stopfunc = simulator.isAsymptoticallyStable # nmax = int(QSS_HISTORY_INTERVAL // QSS_INTEGRATION_INTERVAL) # t, y, stim = simulator.compute(y0, DT_EFF, QSS_INTEGRATION_INTERVAL, nmax=nmax) # logger.debug('completed in %ss', si_format(tcomp, 1)) # conv = t[-1] < QSS_HISTORY_INTERVAL # Initializing empty list to record evolution of charge deviation n = int(QSS_HISTORY_INTERVAL // QSS_INTEGRATION_INTERVAL) # size of history dQ = [] # As long as there is no clear charge convergence or divergence conv, div = False, False tf, yf = t0, y0 while not conv and not div: # Integrate system for small interval and retrieve final charge deviation t0, y0 = tf, yf sol = solve_ivp( lambda t, y: self.effDerivatives(y, t, lkp), [t0, t0 + QSS_INTEGRATION_INTERVAL], y0, method='LSODA' ) tf, yf = sol.t[-1], sol.y[:, -1] dQ.append(yf[0] - Qm0) # logger.debug('{:.0f} ms: dQ = {:.5f} nC/cm2, avg dQ = {:.5f} nC/cm2'.format( # tf * 1e3, dQ[-1] * 1e5, np.mean(dQ[-n:]) * 1e5)) # If last charge deviation is too large -> divergence if np.abs(dQ[-1]) > QSS_Q_DIV_THR: div = True # If last charge deviation or average deviation in recent history # is small enough -> convergence for x in [dQ[-1], np.mean(dQ[-n:])]: if np.abs(x) < QSS_Q_CONV_THR: conv = True # If max integration duration is been reached -> error if tf > QSS_MAX_INTEGRATION_DURATION: raise ValueError('too many iterations') logger.debug('{}vergence after {:.0f} ms: dQ = {:.5f} nC/cm2'.format( {True: 'con', False: 'di'}[conv], tf * 1e3, dQ[-1] * 1e5)) return conv def fixedPointsQSS(self, Fdrive, Adrive, DC, lkp, dQdt): ''' Compute QSS fixed points along the charge dimension for a given combination of US parameters, and determine their stability. :param Fdrive: US frequency (Hz) :param Adrive: US amplitude (Pa) :param DC: duty cycle (-) :param lkp: lookup dictionary for effective variables along charge dimension :param dQdt: charge derivative profile along charge dimension :return: 2-tuple with values of stable and unstable fixed points ''' logger.debug('A = {:.2f} kPa, DC = {:.0f}%'.format(Adrive * 1e-3, DC * 1e2)) # Extract stable and unstable fixed points from QSS charge variation profile dfunc = lambda Qm: - self.iNetQSS(Qm, Fdrive, Adrive, DC) SFP_candidates = getFixedPoints(lkp['Q'], dQdt, filter='stable', der_func=dfunc).tolist() UFPs = getFixedPoints(lkp['Q'], dQdt, filter='unstable', der_func=dfunc).tolist() SFPs = [] pltvars = self.getPltVars() # For each candidate SFP for i, Qm in enumerate(SFP_candidates): logger.debug('Q-SFP = {:.2f} nC/cm2'.format(Qm * 1e5)) # Re-compute QSS *_, QSS_FP = self.quasiSteadyStates(Fdrive, amps=Adrive, charges=Qm, DCs=DC, squeeze_output=True) # Simulate from unperturbed QSS and evaluate stability if not self.evaluateStability(Qm, QSS_FP, lkp): logger.warning('diverging system at ({:.2f} kPa, {:.2f} nC/cm2)'.format( Adrive * 1e-3, Qm * 1e5)) UFPs.append(Qm) else: # For each state unstable_states = [] for x in self.neuron.states: pltvar = pltvars[x] unit_str = pltvar.get('unit', '') factor = pltvar.get('factor', 1) is_stable_direction = [] for sign in [-1, +1]: # Perturb state with small offset QSS_perturbed = deepcopy(QSS_FP) QSS_perturbed[x] *= (1 + sign * QSS_REL_OFFSET) # If gating state, bound within [0., 1.] if self.neuron.isVoltageGated(x): QSS_perturbed[x] = np.clip(QSS_perturbed[x], 0., 1.) logger.debug('{}: {:.5f} -> {:.5f} {}'.format( x, QSS_FP[x] * factor, QSS_perturbed[x] * factor, unit_str)) # Simulate from perturbed QSS and evaluate stability is_stable_direction.append( self.evaluateStability(Qm, QSS_perturbed, lkp)) # Check if system shows stability upon x-state perturbation # in both directions if not np.all(is_stable_direction): unstable_states.append(x) # Classify fixed point as stable only if all states show stability is_stable_FP = len(unstable_states) == 0 {True: SFPs, False: UFPs}[is_stable_FP].append(Qm) logger.info('{}stable fixed-point at ({:.2f} kPa, {:.2f} nC/cm2){}'.format( '' if is_stable_FP else 'un', Adrive * 1e-3, Qm * 1e5, '' if is_stable_FP else ', caused by {} states'.format(unstable_states))) return SFPs, UFPs diff --git a/PySONIC/core/pneuron.py b/PySONIC/core/pneuron.py index c168875..0ca2573 100644 --- a/PySONIC/core/pneuron.py +++ b/PySONIC/core/pneuron.py @@ -1,590 +1,591 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-08-03 11:53:04 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 16:55:30 +# @Last Modified time: 2019-06-06 15:50:56 import abc import inspect import re import numpy as np import pandas as pd from .batches import createQueue from .model import Model from .simulators import PWSimulator from ..postpro import findPeaks from ..constants import * from ..utils import si_format, logger, plural, binarySearch class PointNeuron(Model): ''' Generic point-neuron model interface. ''' tscale = 'ms' # relevant temporal scale of the model + simkey = 'ESTIM' # keyword used to characterize simulations made with this model def __init__(self): self.Qm0 = self.Cm0 * self.Vm0 * 1e-3 # C/cm2 def __repr__(self): return self.__class__.__name__ def filecode(self, Astim, tstim, toffset, PRF, DC): ''' File naming convention. ''' - return 'ESTIM_{}_{}_{:.1f}mA_per_m2_{:.0f}ms{}'.format( - self.name, 'CW' if DC == 1 else 'PW', Astim, tstim * 1e3, + return '{}_{}_{}_{:.1f}mA_per_m2_{:.0f}ms{}'.format( + self.simkey, self.name, 'CW' if DC == 1 else 'PW', Astim, tstim * 1e3, '_PRF{:.2f}Hz_DC{:.2f}%'.format(PRF, DC * 1e2) if DC < 1. else '') @property @abc.abstractmethod def name(self): raise NotImplementedError @property @abc.abstractmethod def Cm0(self): raise NotImplementedError @property @abc.abstractmethod def Vm0(self): raise NotImplementedError @abc.abstractmethod def currents(self, Vm, states): ''' Compute all ionic currents per unit area. :param Vm: membrane potential (mV) :states: state probabilities of the ion channels :return: dictionary of ionic currents per unit area (mA/m2) ''' def iNet(self, Vm, states): ''' net membrane current :param Vm: membrane potential (mV) :states: states of ion channels gating and related variables :return: current per unit area (mA/m2) ''' return sum(self.currents(Vm, states).values()) def dQdt(self, Vm, states): ''' membrane charge density variation rate :param Vm: membrane potential (mV) :states: states of ion channels gating and related variables :return: variation rate (mA/m2) ''' return -self.iNet(Vm, states) def titrationFunc(self, *args, **kwargs): ''' Default titration function. ''' return self.isExcited(*args, **kwargs) def currentToConcentrationRate(self, z_ion, depth): ''' Compute the conversion factor from a specific ionic current (in mA/m2) into a variation rate of submembrane ion concentration (in M/s). :param: z_ion: ion valence :param depth: submembrane depth (m) :return: conversion factor (Mmol.m-1.C-1) ''' return 1e-6 / (z_ion * depth * FARADAY) def nernst(self, z_ion, Cion_in, Cion_out, T): ''' Nernst potential of a specific ion given its intra and extracellular concentrations. :param z_ion: ion valence :param Cion_in: intracellular ion concentration :param Cion_out: extracellular ion concentration :param T: temperature (K) :return: ion Nernst potential (mV) ''' return (Rg * T) / (z_ion * FARADAY) * np.log(Cion_out / Cion_in) * 1e3 def vtrap(self, x, y): ''' Generic function used to compute rate constants. ''' return x / (np.exp(x / y) - 1) def efun(self, x): ''' Generic function used to compute rate constants. ''' return x / (np.exp(x) - 1) def ghkDrive(self, Vm, Z_ion, Cion_in, Cion_out, T): ''' Use the Goldman-Hodgkin-Katz equation to compute the electrochemical driving force of a specific ion species for a given membrane potential. :param Vm: membrane potential (mV) :param Cin: intracellular ion concentration (M) :param Cout: extracellular ion concentration (M) :param T: temperature (K) :return: electrochemical driving force of a single ion particle (mC.m-3) ''' x = Z_ion * FARADAY * Vm / (Rg * T) * 1e-3 # [-] eCin = Cion_in * self.efun(-x) # M eCout = Cion_out * self.efun(x) # M return FARADAY * (eCin - eCout) * 1e6 # mC/m3 def getCurrentsNames(self): return list(self.currents(np.nan, [np.nan] * len(self.states)).keys()) def getPltScheme(self): pltscheme = { 'Q_m': ['Qm'], 'V_m': ['Vm'] } pltscheme['I'] = self.getCurrentsNames() + ['iNet'] for cname in self.getCurrentsNames(): if 'Leak' not in cname: key = 'i_{{{}}}\ kin.'.format(cname[1:]) cargs = inspect.getargspec(getattr(self, cname))[0][1:] pltscheme[key] = [var for var in cargs if var not in ['Vm', 'Cai']] return pltscheme def getPltVars(self, wrapleft='df["', wrapright='"]'): ''' Return a dictionary with information about all plot variables related to the neuron. ''' pltvars = { 'Qm': { 'desc': 'membrane charge density', 'label': 'Q_m', 'unit': 'nC/cm^2', 'factor': 1e5, 'bounds': (-100, 50) }, 'Vm': { 'desc': 'membrane potential', 'label': 'V_m', 'unit': 'mV', 'y0': self.Vm0, 'bounds': (-150, 70) }, 'ELeak': { 'constant': 'obj.ELeak', 'desc': 'non-specific leakage current resting potential', 'label': 'V_{leak}', 'unit': 'mV', 'ls': '--', 'color': 'k' } } for cname in self.getCurrentsNames(): cfunc = getattr(self, cname) cargs = inspect.getargspec(cfunc)[0][1:] pltvars[cname] = { 'desc': inspect.getdoc(cfunc).splitlines()[0], 'label': 'I_{{{}}}'.format(cname[1:]), 'unit': 'A/m^2', 'factor': 1e-3, 'func': '{}({})'.format(cname, ', '.join(['{}{}{}'.format(wrapleft, a, wrapright) for a in cargs])) } for var in cargs: if var not in ['Vm', 'Cai']: vfunc = getattr(self, 'der{}{}'.format(var[0].upper(), var[1:])) desc = cname + re.sub( '^Evolution of', '', inspect.getdoc(vfunc).splitlines()[0]) pltvars[var] = { 'desc': desc, 'label': var, 'bounds': (-0.1, 1.1) } pltvars['iNet'] = { 'desc': inspect.getdoc(getattr(self, 'iNet')).splitlines()[0], 'label': 'I_{net}', 'unit': 'A/m^2', 'factor': 1e-3, 'func': 'iNet({0}Vm{1}, {2}{3}{4}.values.T)'.format( wrapleft, wrapright, wrapleft[:-1], self.states, wrapright[1:]), 'ls': '--', 'color': 'black' } pltvars['dQdt'] = { 'desc': inspect.getdoc(getattr(self, 'dQdt')).splitlines()[0], 'label': 'dQ_m/dt', 'unit': 'A/m^2', 'factor': 1e-3, 'func': 'dQdt({0}Vm{1}, {2}{3}{4}.values.T)'.format( wrapleft, wrapright, wrapleft[:-1], self.states, wrapright[1:]), 'ls': '--', 'color': 'black' } for x in self.getGates(): for rate in ['alpha', 'beta']: pltvars['{}{}'.format(rate, x)] = { 'label': '\\{}_{{{}}}'.format(rate, x), 'unit': 'ms^{-1}', 'factor': 1e-3 } return pltvars def getRatesNames(self, states): ''' Return a list of names of the alpha and beta rates of the neuron. ''' return list(sum( [['alpha{}'.format(x.lower()), 'beta{}'.format(x.lower())] for x in states], [])) @abc.abstractmethod def steadyStates(self, Vm): ''' Compute the steady-state values for a specific membrane potential value. :param Vm: membrane potential (mV) :return: dictionary of steady-states ''' @abc.abstractmethod def derStates(self, Vm, states): ''' Compute the derivatives of channel states. :param Vm: membrane potential (mV) :states: state probabilities of the ion channels :return: current per unit area (mA/m2) ''' @abc.abstractmethod def computeEffRates(self, Vm): ''' Get the effective rate constants of ion channels, averaged along an acoustic cycle, for future use in effective simulations. :param Vm: array of membrane potential values for an acoustic cycle (mV) :return: a dictionary of rate average constants (s-1) ''' def interpEffRates(self, Qm, lkp, keys=None): ''' Interpolate effective rate constants for a given charge density using reference lookup vectors. :param Qm: membrane charge density (C/m2) :states: state probabilities of the ion channels :param lkp: dictionary of 1D vectors of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: dictionary of interpolated rate constants ''' if keys is None: keys = self.rates return {k: np.interp(Qm, lkp['Q'], lkp[k], left=np.nan, right=np.nan) for k in keys} def interpVmeff(self, Qm, lkp): ''' Interpolate the effective membrane potential for a given charge density using reference lookup vectors. :param Qm: membrane charge density (C/m2) :param lkp: dictionary of 1D vectors of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: dictionary of interpolated rate constants ''' return np.interp(Qm, lkp['Q'], lkp['V'], left=np.nan, right=np.nan) @abc.abstractmethod def derEffStates(self, Qm, states, lkp): ''' Compute the effective derivatives of channel states, based on 1-dimensional linear interpolation of "effective" coefficients that summarize the system's behaviour over an acoustic cycle. :param Qm: membrane charge density (C/m2) :states: state probabilities of the ion channels :param lkp: dictionary of 1D vectors of "effective" coefficients over the charge domain, for specific frequency and amplitude values. ''' def Qbounds(self): ''' Determine bounds of membrane charge physiological range for a given neuron. ''' return np.array([np.round(self.Vm0 - 25.0), 50.0]) * self.Cm0 * 1e-3 # C/m2 def isVoltageGated(self, state): ''' Determine whether a given state is purely voltage-gated or not.''' return 'alpha{}'.format(state.lower()) in self.rates def getGates(self): ''' Retrieve the names of the neuron's states that match an ion channel gating. ''' gates = [] for x in self.states: if self.isVoltageGated(x): gates.append(x) return gates def qsStates(self, lkp, states): ''' Compute a collection of quasi steady states using the standard xinf = ax / (ax + Bx) equation. :param lkp: dictionary of 1D vectors of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: dictionary of quasi-steady states ''' return { x: lkp['alpha{}'.format(x)] / (lkp['alpha{}'.format(x)] + lkp['beta{}'.format(x)]) for x in states } @abc.abstractmethod def quasiSteadyStates(self, lkp): ''' Compute the quasi-steady states of a neuron for a range of membrane charge densities, based on 1-dimensional lookups interpolated at a given sonophore diameter, US frequency, US amplitude and duty cycle. :param lkp: dictionary of 1D vectors of "effective" coefficients over the charge domain, for specific frequency and amplitude values. :return: dictionary of quasi-steady states ''' def getRates(self, Vm): ''' Compute the ion channels rate constants for a given membrane potential. :param Vm: membrane potential (mV) :return: a dictionary of rate constants and their values at the given potential. ''' rates = {} for x in self.getGates(): x = x.lower() alpha_str, beta_str = ['{}{}'.format(s, x.lower()) for s in ['alpha', 'beta']] inf_str, tau_str = ['{}inf'.format(x.lower()), 'tau{}'.format(x.lower())] if hasattr(self, 'alpha{}'.format(x)): alphax = getattr(self, alpha_str)(Vm) betax = getattr(self, beta_str)(Vm) elif hasattr(self, '{}inf'.format(x)): xinf = getattr(self, inf_str)(Vm) taux = getattr(self, tau_str)(Vm) alphax = xinf / taux betax = 1 / taux - alphax rates[alpha_str] = alphax rates[beta_str] = betax return rates def Vderivatives(self, y, t, Iinj): ''' Compute the derivatives of a V-cast HH system for a specific value of injected current. :param y: vector of HH system variables at time t :param t: time value (s, unused) :param Iinj: injected current (mA/m2) :return: vector of HH system derivatives at time t ''' Vm, *states = y Iionic = self.iNet(Vm, states) # mA/m2 dVmdt = (- Iionic + Iinj) / self.Cm0 # mV/s dstates = self.derStates(Vm, states) return [dVmdt, *[dstates[k] for k in self.states]] def Qderivatives(self, y, t, Cm=None): ''' Compute the derivatives of the n-ODE HH system variables, based on a value of membrane capacitance. :param y: vector of HH system variables at time t :param t: specific instant in time (s) :param Cm: membrane capacitance (F/m2) :return: vector of HH system derivatives at time t ''' if Cm is None: Cm = self.Cm0 Qm, *states = y Vm = Qm / Cm * 1e3 # mV dQmdt = - self.iNet(Vm, states) * 1e-3 # A/m2 dstates = self.derStates(Vm, states) return [dQmdt, *[dstates[k] for k in self.states]] def checkInputs(self, Astim, tstim, toffset, PRF, DC): ''' Check validity of electrical stimulation parameters. :param Astim: pulse amplitude (mA/m2) :param tstim: pulse duration (s) :param toffset: offset duration (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) ''' # Check validity of stimulation parameters if not all(isinstance(param, float) for param in [Astim, tstim, toffset, DC]): raise TypeError('Invalid stimulation parameters (must be float typed)') if tstim <= 0: raise ValueError('Invalid stimulus duration: {} ms (must be strictly positive)' .format(tstim * 1e3)) if toffset < 0: raise ValueError('Invalid stimulus offset: {} ms (must be positive or null)' .format(toffset * 1e3)) if DC <= 0.0 or DC > 1.0: raise ValueError('Invalid duty cycle: {} (must be within ]0; 1])'.format(DC)) if DC < 1.0: if not isinstance(PRF, float): raise TypeError('Invalid PRF value (must be float typed)') if PRF is None: raise AttributeError('Missing PRF value (must be provided when DC < 1)') if PRF < 1 / tstim: raise ValueError('Invalid PRF: {} Hz (PR interval exceeds stimulus duration)' .format(PRF)) def meta(self, Astim, tstim, toffset, PRF, DC): ''' Return information about object and simulation parameters. :param Astim: stimulus amplitude (mA/m2) :param tstim: stimulus duration (s) :param toffset: stimulus offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: stimulus duty cycle (-) :return: meta-data dictionary ''' return { 'neuron': self.name, 'Astim': Astim, 'tstim': tstim, 'toffset': toffset, 'PRF': PRF, 'DC': DC } def simulate(self, Astim, tstim, toffset, PRF=100., DC=1.0): ''' Simulate a specific neuron model for a specific set of electrical parameters, and return output data in a dataframe. :param Astim: pulse amplitude (mA/m2) :param tstim: pulse duration (s) :param toffset: offset duration (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :return: 2-tuple with the output dataframe and computation time. ''' logger.info( '%s: simulation @ A = %sA/m2, t = %ss (%ss offset)%s', self, si_format(Astim, 2, space=' '), *si_format([tstim, toffset], 1, space=' '), (', PRF = {}Hz, DC = {:.2f}%'.format( si_format(PRF, 2, space=' '), DC * 1e2) if DC < 1.0 else '')) # Check validity of stimulation parameters self.checkInputs(Astim, tstim, toffset, PRF, DC) # Set initial conditions steady_states = self.steadyStates(self.Vm0) y0 = np.array([self.Vm0, *[steady_states[k] for k in self.states]]) # Initialize simulator and compute solution logger.debug('Computing solution') simulator = PWSimulator( lambda y, t: self.Vderivatives(y, t, Astim), lambda y, t: self.Vderivatives(y, t, 0.)) (t, y, stim), tcomp = simulator(y0, DT_ESTIM, tstim, toffset, PRF, DC, monitor_time=True) logger.debug('completed in %ss', si_format(tcomp, 1)) # Store output in dataframe data = pd.DataFrame({ 't': t, 'stimstate': stim, 'Vm': y[:, 0], 'Qm': y[:, 0] * self.Cm0 * 1e-3 }) data['Qm'] = data['Vm'].values * self.Cm0 * 1e-3 for i in range(len(self.states)): data[self.states[i]] = y[:, i + 1] # Log number of detected spikes nspikes = self.getNSpikes(data) logger.debug('{} spike{} detected'.format(nspikes, plural(nspikes))) # Return dataframe and computation time return data, tcomp def simQueue(self, amps, durations, offsets, PRFs, DCs): ''' Create a serialized 2D array of all parameter combinations for a series of individual parameter sweeps, while avoiding repetition of CW protocols for a given PRF sweep. :param amps: list (or 1D-array) of acoustic amplitudes :param durations: list (or 1D-array) of stimulus durations :param offsets: list (or 1D-array) of stimulus offsets (paired with durations array) :param PRFs: list (or 1D-array) of pulse-repetition frequencies :param DCs: list (or 1D-array) of duty cycle values :return: list of parameters (list) for each simulation ''' if amps is None: amps = [np.nan] DCs = np.array(DCs) queue = [] if 1.0 in DCs: queue += createQueue(amps, durations, offsets, min(PRFs), 1.0) if np.any(DCs != 1.0): queue += createQueue(amps, durations, offsets, PRFs, DCs[DCs != 1.0]) for item in queue: if np.isnan(item[0]): item[0] = None return queue def getNSpikes(self, data): ''' Compute number of spikes in charge profile of simulation output. :param data: dataframe containing output time series :return: number of detected spikes ''' dt = np.diff(data.ix[:1, 't'].values)[0] ipeaks, *_ = findPeaks( data['Qm'].values, SPIKE_MIN_QAMP, int(np.ceil(SPIKE_MIN_DT / dt)), SPIKE_MIN_QPROM ) return ipeaks.size def getStabilizationValue(self, data): ''' Determine stabilization value from the charge profile of a simulation output. :param data: dataframe containing output time series :return: charge stabilization value (or np.nan if no stabilization detected) ''' # Extract charge signal posterior to observation window t, Qm = [data[key].values for key in ['t', 'Qm']] if t.max() <= TMIN_STABILIZATION: raise ValueError('solution length is too short to assess stabilization') Qm = Qm[t > TMIN_STABILIZATION] # Compute variation range Qm_range = np.ptp(Qm) logger.debug('%.2f nC/cm2 variation range over the last %.0f ms, Qmf = %.2f nC/cm2', Qm_range * 1e5, TMIN_STABILIZATION * 1e3, Qm[-1] * 1e5) # Return final value only if stabilization is detected if np.ptp(Qm) < QSS_Q_DIV_THR: return Qm[-1] else: return np.nan def isExcited(self, data): ''' Determine if neuron is excited from simulation output. :param data: dataframe containing output time series :return: boolean stating whether neuron is excited or not ''' return self.getNSpikes(data) > 0 def isSilenced(self, data): ''' Determine if neuron is silenced from simulation output. :param data: dataframe containing output time series :return: boolean stating whether neuron is silenced or not ''' return not np.isinan(self.getStabilizationValue(data)) def titrate(self, tstim, toffset, PRF, DC, xfunc=None, Arange=(0., 2 * TITRATION_ESTIM_A_MAX)): ''' Use a binary search to determine the threshold amplitude needed to obtain neural excitation for a given duration, PRF and duty cycle. :param tstim: duration of US stimulation (s) :param toffset: duration of the offset (s) :param PRF: pulse repetition frequency (Hz) :param DC: pulse duty cycle (-) :param xfunc: function determining whether condition is reached from simulation output :param Arange: search interval for Astim, iteratively refined :return: excitation threshold amplitude (mA/m2) ''' # Default output function if xfunc is None: xfunc = self.titrationFunc return binarySearch( lambda x: xfunc(self.simulate(*x)[0]), [tstim, toffset, PRF, DC], 0, Arange, TITRATION_ESTIM_DA_MAX ) diff --git a/PySONIC/neurons/__init__.py b/PySONIC/neurons/__init__.py index 9270ea8..10c3f90 100644 --- a/PySONIC/neurons/__init__.py +++ b/PySONIC/neurons/__init__.py @@ -1,25 +1,36 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-06-06 13:36:00 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-01-08 15:26:48 +# @Last Modified time: 2019-06-06 15:02:07 import inspect import sys from .cortical import CorticalRS, CorticalFS, CorticalLTS, CorticalIB from .thalamic import ThalamicRE, ThalamoCortical from .leech import LeechTouch, LeechPressure, LeechRetzius from .stn import OtsukaSTN from .fh import FrankenhaeuserHuxley def getNeuronsDict(): + ''' Construct a dictionary of all the implemented point neuron classes, indexed by name. ''' current_module = sys.modules[__name__] neurons_dict = {} for _, obj in inspect.getmembers(current_module): if inspect.isclass(obj) and isinstance(obj.name, str): neurons_dict[obj.name] = obj return neurons_dict + + +def getPointNeuron(name): + ''' Return a point-neuron instance corresponding to a given name. ''' + neuron_classes = getNeuronsDict() + try: + return neuron_classes[name]() + except KeyError: + raise ValueError('"{}" neuron not found. Implemented neurons are: {}'.format( + name, ', '.join(list(neuron_classes.keys())))) diff --git a/PySONIC/parsers.py b/PySONIC/parsers.py index 547cb53..1ab5536 100644 --- a/PySONIC/parsers.py +++ b/PySONIC/parsers.py @@ -1,360 +1,358 @@ import logging import numpy as np from argparse import ArgumentParser from .utils import Intensity2Pressure, selectDirDialog -from .neurons import getNeuronsDict, CorticalRS +from .neurons import getPointNeuron, CorticalRS class SimParser(ArgumentParser): ''' Generic simulation parser interface. ''' dist_str = '[scale min max n]' def __init__(self): super().__init__() self.defaults = {} self.allowed = {} self.factors = {} self.addPlot() self.addMPI() self.addVerbose() def getDistribution(self, xmin, xmax, nx, scale='lin'): if scale == 'log': xmin, xmax = np.log10(xmin), np.log10(xmax) return {'lin': np.linspace, 'log': np.logspace}[scale](xmin, xmax, nx) def getDistFromList(self, xlist): if not isinstance(xlist, list): raise TypeError('Input must be a list') if len(xlist) != 4: raise ValueError('List must contain exactly 4 arguments ([type, min, max, n])') scale = xlist[0] if scale not in ('log', 'lin'): raise ValueError('Unknown distribution type (must be "lin" or "log")') xmin, xmax = [float(x) for x in xlist[1:-1]] if xmin >= xmax: raise ValueError('Specified minimum higher or equal than specified maximum') nx = int(xlist[-1]) if nx < 2: raise ValueError('Specified number must be at least 2') return self.getDistribution(xmin, xmax, nx, scale=scale) def addVerbose(self): self.add_argument( '-v', '--verbose', default=False, action='store_true', help='Increase verbosity') def addPlot(self): self.add_argument( '-p', '--plot', type=str, nargs='+', help='Variables to plot') def addMPI(self): self.add_argument( '--mpi', default=False, action='store_true', help='Use multiprocessing') def addSave(self): self.add_argument( '-s', '--save', default=False, action='store_true', help='Save output figures') def addCmap(self, default='viridis'): self.add_argument( '-c', '--cmap', type=str, default=default, help='Colormap name') def addInputDir(self): self.add_argument( '-i', '--inputdir', type=str, help='Input directory') def addOutputDir(self): self.add_argument( '-o', '--outputdir', type=str, help='Output directory') def parseDir(self, key, args): directory = args[key] if args[key] is not None else selectDirDialog() if directory == '': raise ValueError('No {} selected'.format(key)) return directory def parseInputDir(self, args): return self.parseDir('inputdir', args) def parseOutputDir(self, args): return self.parseDir('outputdir', args) def parseLogLevel(self, args): return logging.DEBUG if args.pop('verbose') else logging.INFO def parsePltScheme(self, args): if args['plot'] == ['all']: return None else: return {x: [x] for x in args['plot']} def restrict(self, args, keys): if sum([args[x] is not None for x in keys]) > 1: raise ValueError( 'You must provide only one of the following arguments: {}'.format(', '.join(keys))) def parse2array(self, args, key, factor=1): return np.array(args[key]) * factor def parse(self): args = vars(super().parse_args()) args['loglevel'] = self.parseLogLevel(args) for k, v in self.defaults.items(): if args[k] is None: args[k] = [v] return args class MechSimParser(SimParser): ''' Parser to run mechanical simulations from the command line. ''' def __init__(self): super().__init__() self.defaults.update({ 'radius': 32.0, # nm 'embedding': 0., # um 'Cm0': CorticalRS().Cm0 * 1e2, # uF/m2 'Qm0': CorticalRS().Qm0 * 1e5, # nC/m2 'freq': 500.0, # kHz 'amp': 100.0, # kPa 'charge': 0. # nC/cm2 }) self.factors.update({ 'radius': 1e-9, 'embedding': 1e-6, 'Cm0': 1e-2, 'Qm0': 1e-5, 'freq': 1e3, 'amp': 1e3, 'charge': 1e-5 }) self.addRadius() self.addEmbedding() self.addCm0() self.addQm0() self.addFdrive() self.addAdrive() self.addCharge() def addRadius(self): self.add_argument( '-a', '--radius', nargs='+', type=float, help='Sonophore radius (nm)') def addEmbedding(self): self.add_argument( '--embedding', nargs='+', type=float, help='Embedding depth (um)') def addCm0(self): self.add_argument( '--Cm0', type=float, help='Resting membrane capacitance (uF/cm2)') def addQm0(self): self.add_argument( '--Qm0', type=float, help='Resting membrane charge density (nC/cm2)') def addFdrive(self): self.add_argument( '-f', '--freq', nargs='+', type=float, help='US frequency (kHz)') def addAdrive(self): self.add_argument( '-A', '--amp', nargs='+', type=float, help='Acoustic pressure amplitude (kPa)') self.add_argument( '--Arange', type=str, nargs='+', help='Amplitude range {} (kPa)'.format(self.dist_str)) self.add_argument( '-I', '--intensity', nargs='+', type=float, help='Acoustic intensity (W/cm2)') self.add_argument( '--Irange', type=str, nargs='+', help='Intensity range {} (W/cm2)'.format(self.dist_str)) def addCharge(self): self.add_argument( '-Q', '--charge', nargs='+', type=float, help='Membrane charge density (nC/cm2)') def parseAmp(self, args): params = ['Irange', 'Arange', 'intensity', 'amp'] self.restrict(args, params[:-1]) Irange, Arange, Int, Adrive = [args.pop(k) for k in params] Ascale = None if Irange is not None: amps = Intensity2Pressure(self.getDistFromList(Irange) * 1e4) # Pa Ascale = Irange[0] elif Int is not None: amps = Intensity2Pressure(np.array(Int) * 1e4) # Pa elif Arange is not None: amps = self.getDistFromList(Arange) * self.factors['amp'] # Pa Ascale = Arange[0] else: amps = np.array(Adrive) * self.factors['amp'] # Pa Ascale = args.get('Arange', ['lin'])[0] return amps, Ascale def parse(self): args = super().parse() args['amp'], args['Ascale'] = self.parseAmp(args) for key in ['radius', 'embedding', 'Cm0', 'Qm0', 'freq', 'charge']: args[key] = self.parse2array(args, key, factor=self.factors[key]) return args class PWSimParser(SimParser): ''' Generic parser interface to run PW patterned simulations from the command line. ''' def __init__(self): super().__init__() self.defaults.update({ 'neuron': 'RS', 'tstim': 100.0, # ms 'toffset': 50., # ms 'PRF': 100.0, # Hz 'DC': 100.0 # % }) self.factors.update({ 'tstim': 1e-3, 'toffset': 1e-3, 'PRF': 1., 'DC': 1e-2 }) self.allowed.update({ - 'neuron': list(getNeuronsDict().keys()), 'DC': range(101) }) self.addNeuron() self.addTstim() self.addToffset() self.addPRF() self.addDC() self.addSpanDC() self.addTitrate() def addNeuron(self): self.add_argument( - '-n', '--neuron', type=str, nargs='+', - choices=self.allowed['neuron'], help='Neuron name (string)') + '-n', '--neuron', type=str, nargs='+', help='Neuron name (string)') def addTstim(self): self.add_argument( '-t', '--tstim', nargs='+', type=float, help='Stimulus duration (ms)') def addToffset(self): self.add_argument( '--toffset', nargs='+', type=float, help='Offset duration (ms)') def addPRF(self): self.add_argument( '--PRF', nargs='+', type=float, help='PRF (Hz)') def addDC(self): self.add_argument( '--DC', nargs='+', type=float, help='Duty cycle (%%)') def addSpanDC(self): self.add_argument( '--spanDC', default=False, action='store_true', help='Span DC from 1 to 100%%') def addTitrate(self): self.add_argument( '--titrate', default=False, action='store_true', help='Perform titration') def parseNeuron(self, args): - for item in args['neuron']: - if item not in self.allowed['neuron']: - raise ValueError('Unknown neuron type: "{}"'.format(item)) - return [getNeuronsDict()[n]() for n in args['neuron']] + # for item in args['neuron']: + # if item not in self.allowed['neuron']: + # raise ValueError('Unknown neuron type: "{}"'.format(item)) + return [getPointNeuron(n) for n in args['neuron']] def parseAmp(self, args): return NotImplementedError def parseDC(self, args): if args.pop('spanDC'): return np.arange(1, 101) * self.factors['DC'] # (-) else: return np.array(args['DC']) * self.factors['DC'] # (-) def parse(self, args=None): if args is None: args = super().parse() args['neuron'] = self.parseNeuron(args) args['DC'] = self.parseDC(args) for key in ['tstim', 'toffset', 'PRF']: args[key] = self.parse2array(args, key, factor=self.factors[key]) return args class EStimParser(PWSimParser): ''' Parser to run E-STIM simulations from the command line. ''' def __init__(self): super().__init__() self.defaults.update({'amp': 10.0}) # mA/m2 self.factors.update({'amp': 1.}) self.addAstim() def addAstim(self): self.add_argument( '-A', '--amp', nargs='+', type=float, help='Amplitude of injected current density (mA/m2)') self.add_argument( '--Arange', type=str, nargs='+', help='Amplitude range {} (mA/m2)'.format(self.dist_str)) def parseAmp(self, args): if args.pop('titrate'): return None Arange, Astim = [args.pop(k) for k in ['Arange', 'amp']] Ascale = 'lin' if Arange is not None: amps = self.getDistFromList(Arange) * self.factors['amp'] # mA/m2 Ascale = Arange[0] else: amps = np.array(Astim) * self.factors['amp'] # mA/m2 return amps, Ascale def parse(self): args = super().parse() args['amp'], args['Ascale'] = self.parseAmp(args) return args class AStimParser(PWSimParser, MechSimParser): ''' Parser to run A-STIM simulations from the command line. ''' def __init__(self): MechSimParser.__init__(self) PWSimParser.__init__(self) self.defaults.update({'method': 'sonic'}) self.allowed.update({'method': ['classic', 'hybrid', 'sonic']}) self.addMethod() def addMethod(self): self.add_argument( '-m', '--method', nargs='+', type=str, help='Numerical integration method ({})'.format(', '.join(self.allowed['method']))) def parseMethod(self, args): for item in args['method']: if item not in self.allowed['method']: raise ValueError('Unknown neuron type: "{}"'.format(item)) def parseAmp(self, args): if args.pop('titrate'): return None return MechSimParser.parseAmp(self, args) def parse(self): args = PWSimParser.parse(self, args=MechSimParser.parse(self)) for k in ['Cm0', 'Qm0', 'embedding', 'charge']: del args[k] self.parseMethod(args) return args diff --git a/PySONIC/plt/QSS.py b/PySONIC/plt/QSS.py index 1d36265..c6e0676 100644 --- a/PySONIC/plt/QSS.py +++ b/PySONIC/plt/QSS.py @@ -1,434 +1,434 @@ import inspect import logging import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import cm, colors from ..postpro import getFixedPoints from ..core import NeuronalBilayerSonophore, Batch from .pltutils import * from ..utils import logger, cachePKL -root = 'C:/Users/Théo/Desktop/QSS' +root = '../../../QSS analysis' def plotVarQSSDynamics(neuron, a, Fdrive, Adrive, charges, varname, varrange, fs=12): ''' Plot the QSS-approximated derivative of a specific variable as function of the variable itself, as well as equilibrium values, for various membrane charge densities at a given acoustic amplitude. :param neuron: neuron object :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param Adrive: US amplitude (Pa) :param charges: charge density vector (C/m2) :param varname: name of variable to plot :param varrange: range over which to compute the derivative :return: figure handle ''' # Extract information about variable to plot pltvar = neuron.getPltVars()[varname] # Get methods to compute derivative and steady-state of variable of interest derX_func = getattr(neuron, 'der{}{}'.format(varname[0].upper(), varname[1:])) Xinf_func = getattr(neuron, '{}inf'.format(varname)) derX_args = inspect.getargspec(derX_func)[0][1:] Xinf_args = inspect.getargspec(Xinf_func)[0][1:] # Get dictionary of charge and amplitude dependent QSS variables nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) _, Qref, lookups, QSS = nbls.quasiSteadyStates( Fdrive, amps=Adrive, charges=charges, squeeze_output=True) df = QSS df['Vm'] = lookups['V'] # Create figure fig, ax = plt.subplots(figsize=(6, 4)) ax.set_title('{} neuron - QSS {} dynamics @ {:.2f} kPa'.format( neuron.name, pltvar['desc'], Adrive * 1e-3), fontsize=fs) ax.set_xscale('log') for key in ['top', 'right']: ax.spines[key].set_visible(False) ax.set_xlabel('$\\rm {}\ ({})$'.format(pltvar['label'], pltvar.get('unit', '')), fontsize=fs) ax.set_ylabel('$\\rm QSS\ d{}/dt\ ({}/s)$'.format(pltvar['label'], pltvar.get('unit', '1')), fontsize=fs) ax.set_ylim(-40, 40) ax.axhline(0, c='k', linewidth=0.5) y0_str = '{}0'.format(varname) if hasattr(neuron, y0_str): ax.axvline(getattr(neuron, y0_str) * pltvar.get('factor', 1), label=y0_str, c='k', linewidth=0.5) # For each charge value icolor = 0 for j, Qm in enumerate(charges): lbl = 'Q = {:.0f} nC/cm2'.format(Qm * 1e5) # Compute variable derivative as a function of its value, as well as equilibrium value, # keeping other variables at quasi steady-state derX_inputs = [varrange if arg == varname else df[arg][j] for arg in derX_args] Xinf_inputs = [df[arg][j] for arg in Xinf_args] dX_QSS = neuron.derCai(*derX_inputs) Xeq_QSS = neuron.Caiinf(*Xinf_inputs) # Plot variable derivative and its root as a function of the variable itself c = 'C{}'.format(icolor) ax.plot(varrange * pltvar.get('factor', 1), dX_QSS * pltvar.get('factor', 1), c=c, label=lbl) ax.axvline(Xeq_QSS * pltvar.get('factor', 1), linestyle='--', c=c) icolor += 1 ax.legend(frameon=False, fontsize=fs - 3) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) fig.tight_layout() fig.canvas.set_window_title('{}_QSS_{}_dynamics_{:.2f}kPa'.format( neuron.name, varname, Adrive * 1e-3)) return fig def plotQSSvars(neuron, a, Fdrive, Adrive, fs=12): ''' Plot effective membrane potential, quasi-steady states and resulting membrane currents as a function of membrane charge density, for a given acoustic amplitude. :param neuron: neuron object :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param Adrive: US amplitude (Pa) :return: figure handle ''' # Get neuron-specific pltvars pltvars = neuron.getPltVars() # Compute neuron-specific charge and amplitude dependent QS states at this amplitude nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) _, Qref, lookups, QSS = nbls.quasiSteadyStates(Fdrive, amps=Adrive, squeeze_output=True) Vmeff = lookups['V'] # Compute QSS currents currents = neuron.currents(Vmeff, np.array([QSS[k] for k in neuron.states])) iNet = sum(currents.values()) # Compute fixed points in dQdt profile dQdt = -iNet Q_SFPs = getFixedPoints(Qref, dQdt, filter='stable') Q_UFPs = getFixedPoints(Qref, dQdt, filter='unstable') # Extract dimensionless states norm_QSS = {} for x in neuron.states: if 'unit' not in pltvars[x]: norm_QSS[x] = QSS[x] # Create figure fig, axes = plt.subplots(3, 1, figsize=(7, 9)) axes[-1].set_xlabel('$\\rm Q_m\ (nC/cm^2)$', fontsize=fs) for ax in axes: for skey in ['top', 'right']: ax.spines[skey].set_visible(False) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) for item in ax.get_xticklabels(minor=True): item.set_visible(False) figname = '{} neuron - QSS dynamics @ {:.2f} kPa'.format(neuron.name, Adrive * 1e-3) fig.suptitle(figname, fontsize=fs) # Subplot: Vmeff ax = axes[0] ax.set_ylabel('$V_m^*$ (mV)', fontsize=fs) ax.plot(Qref * 1e5, Vmeff, color='k') ax.axhline(neuron.Vm0, linewidth=0.5, color='k') # Subplot: dimensionless quasi-steady states cset = plt.get_cmap('Dark2').colors + plt.get_cmap('tab10').colors ax = axes[1] ax.set_ylabel('QSS gating variables (-)', fontsize=fs) ax.set_yticks([0, 0.5, 1]) ax.set_ylim([-0.05, 1.05]) for i, (label, QS_state) in enumerate(norm_QSS.items()): ax.plot(Qref * 1e5, QS_state, label=label, c=cset[i]) # Subplot: currents ax = axes[2] cset = plt.get_cmap('tab10').colors ax.set_ylabel('QSS currents ($\\rm A/m^2$)', fontsize=fs) for i, (k, I) in enumerate(currents.items()): ax.plot(Qref * 1e5, -I * 1e-3, '--', c=cset[i], label='$\\rm -{}$'.format(neuron.getPltVars()[k]['label'])) ax.plot(Qref * 1e5, -iNet * 1e-3, color='k', label='$\\rm -I_{Net}$') ax.axhline(0, color='k', linewidth=0.5) if Q_SFPs.size > 0: ax.plot(Q_SFPs * 1e5, np.zeros(Q_SFPs.size), 'o', c='k', markersize=5, zorder=2) if Q_SFPs.size > 0: ax.plot(Q_UFPs * 1e5, np.zeros(Q_UFPs.size), 'o', c='k', markersize=5, mfc='none', zorder=2) fig.tight_layout() fig.subplots_adjust(right=0.8) for ax in axes[1:]: ax.legend(loc='center right', fontsize=fs, frameon=False, bbox_to_anchor=(1.3, 0.5)) for ax in axes[:-1]: ax.set_xticklabels([]) fig.canvas.set_window_title( '{}_QSS_states_vs_Qm_{:.2f}kPa'.format(neuron.name, Adrive * 1e-3)) return fig def plotQSSVarVsAmp(neuron, a, Fdrive, varname, amps=None, DC=1., fs=12, cmap='viridis', yscale='lin', zscale='lin'): ''' Plot a specific QSS variable (state or current) as a function of membrane charge density, for various acoustic amplitudes. :param neuron: neuron object :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param amps: US amplitudes (Pa) :param DC: duty cycle (-) :param varname: extraction key for variable to plot :return: figure handle ''' # Determine stimulation modality if a is None and Fdrive is None: stim_type = 'elec' a = 32e-9 Fdrive = 500e3 else: stim_type = 'US' # Extract information about variable to plot pltvar = neuron.getPltVars()[varname] Qvar = neuron.getPltVars()['Qm'] Afactor = {'US': 1e-3, 'elec': 1.}[stim_type] log = 'plotting {} neuron QSS {} vs. amp for {} stimulation @ {:.0f}% DC'.format( neuron.name, varname, stim_type, DC * 1e2) logger.info(log) nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) # Get reference dictionaries for zero amplitude _, Qref, lookups0, QSS0 = nbls.quasiSteadyStates(Fdrive, amps=0., squeeze_output=True) Vmeff0 = lookups0['V'] if stim_type == 'elec': # if E-STIM case, compute steady states with constant capacitance Vmeff0 = Qref / neuron.Cm0 * 1e3 QSS0 = neuron.steadyStates(Vmeff0) df0 = QSS0 df0['Vm'] = Vmeff0 # Create figure fig, ax = plt.subplots(figsize=(6, 4)) title = '{} neuron - {}steady-state {}'.format( neuron.name, 'quasi-' if amps is not None else '', pltvar['desc']) if amps is not None: title += '\nvs. {} amplitude @ {:.0f}% DC'.format(stim_type, DC * 1e2) ax.set_title(title, fontsize=fs) ax.set_xlabel('$\\rm {}\ ({})$'.format(Qvar['label'], Qvar['unit']), fontsize=fs) ax.set_ylabel('$\\rm QSS\ {}\ ({})$'.format(pltvar['label'], pltvar.get('unit', '')), fontsize=fs) if yscale == 'log': ax.set_yscale('log') for key in ['top', 'right']: ax.spines[key].set_visible(False) # Plot y-variable reference line, if any y0 = None y0_str = '{}0'.format(varname) if hasattr(neuron, y0_str): y0 = getattr(neuron, y0_str) * pltvar.get('factor', 1) elif varname in neuron.getCurrentsNames() + ['iNet', 'dQdt']: y0 = 0. y0_str = '' if y0 is not None: ax.axhline(y0, label=y0_str, c='k', linewidth=0.5) # Plot reference QSS profile of variable as a function of charge density var0 = extractPltVar( neuron, pltvar, pd.DataFrame({k: df0[k] for k in df0.keys()}), name=varname) ax.plot(Qref * Qvar['factor'], var0, '--', c='k', zorder=1, label='$\\rm A_{{{}}}=0$'.format(stim_type)) # Define color code mymap = plt.get_cmap(cmap) zref = amps * Afactor if zscale == 'lin': norm = colors.Normalize(zref.min(), zref.max()) elif zscale == 'log': norm = colors.LogNorm(zref.min(), zref.max()) sm = cm.ScalarMappable(norm=norm, cmap=mymap) sm._A = [] # Get amplitude-dependent QSS dictionary if stim_type == 'US': # Get dictionary of charge and amplitude dependent QSS variables _, Qref, lookups, QSS = nbls.quasiSteadyStates( Fdrive, amps=amps, DCs=DC, squeeze_output=True) df = QSS df['Vm'] = lookups['V'] else: # Repeat zero-amplitude QSS dictionary for all amplitudes df = {k: np.tile(df0[k], (amps.size, 1)) for k in df0} # Plot QSS profiles for various amplitudes for i, A in enumerate(amps): var = extractPltVar( neuron, pltvar, pd.DataFrame({k: df[k][i] for k in df.keys()}), name=varname) if varname == 'dQdt' and stim_type == 'elec': var += A * DC * pltvar['factor'] ax.plot(Qref * Qvar['factor'], var, c=sm.to_rgba(A * Afactor), zorder=0) # Add legend and adjust layout ax.legend(frameon=False, fontsize=fs) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) fig.tight_layout() fig.subplots_adjust(bottom=0.15, top=0.9, right=0.80, hspace=0.5) # Plot amplitude colorbar if amps is not None: cbarax = fig.add_axes([0.85, 0.15, 0.03, 0.75]) fig.colorbar(sm, cax=cbarax) cbarax.set_ylabel( 'Amplitude ({})'.format({'US': 'kPa', 'elec': 'mA/m2'}[stim_type]), fontsize=fs) for item in cbarax.get_yticklabels(): item.set_fontsize(fs) title = '{}_{}SS_{}'.format(neuron.name, 'Q' if amps is not None else '', varname) if amps is not None: title += '_vs_{}A_{}_{:.0f}%DC'.format(zscale, stim_type, DC * 1e2) fig.canvas.set_window_title(title) return fig @cachePKL( root, lambda nbls, Fdrive, _, DC: 'FPs_vs_Adrive_{}_{:.0f}kHz_{:.0f}%DC'.format( nbls.neuron.name, Fdrive * 1e-3, DC * 1e2) ) def getQSSFixedPointsvsAdrive(nbls, Fdrive, amps, DC, mpi=False, loglevel=logging.INFO): # Compute 2D QSS charge variation array _, Qref, lookups, QSS = nbls.quasiSteadyStates( Fdrive, amps=amps, DCs=DC, squeeze_output=True) dQdt = -nbls.neuron.iNet(lookups['V'], np.array([QSS[k] for k in nbls.neuron.states])) # mA/m2 # Generate batch queue QSS_queue = [] for iA, Adrive in enumerate(amps): lookups1D = {k: v[iA, :] for k, v in lookups.items()} lookups1D['Q'] = Qref QSS_queue.append([Fdrive, Adrive, DC, lookups1D, dQdt[iA, :]]) # Run batch to find stable and unstable fixed points at each amplitude QSS_batch = Batch(nbls.fixedPointsQSS, QSS_queue) QSS_output = QSS_batch(mpi=mpi, loglevel=loglevel) # Sort points by amplitude SFPs, UFPs = [], [] for i, Adrive in enumerate(amps): SFPs += [(Adrive, Qm) for Qm in QSS_output[i][0]] UFPs += [(Adrive, Qm) for Qm in QSS_output[i][1]] return SFPs, UFPs @cachePKL( root, lambda nbls, Fdrive, _, tstim, toffset, PRF, DC: 'stab_vs_Adrive_{}_{:.0f}kHz_{:.0f}ms_{:.0f}ms_offset_{:.0f}Hz_PRF_{:.0f}%DC'.format( nbls.neuron.name, Fdrive * 1e-3, tstim * 1e3, toffset * 1e3, PRF, DC * 1e2) ) def getSimFixedPointsvsAdrive(nbls, Fdrive, amps, tstim, toffset, PRF, DC, outputdir=None, mpi=False, loglevel=logging.INFO): # Get stabilization point from simulation, if any stab_points = [] for Adrive in amps: data, _ = nbls.load(outputdir, Fdrive, Adrive, tstim, toffset, PRF, DC, 'sonic') stab_points.append((Adrive, nbls.neuron.getStabilizationValue(data))) return stab_points def plotEqChargeVsAmp(neuron, a, Fdrive, amps=None, tstim=250e-3, toffset=50e-3, PRF=100.0, DC=1., fs=12, xscale='lin', compdir=None, mpi=False, loglevel=logging.INFO): ''' Plot the equilibrium membrane charge density as a function of acoustic amplitude, given an initial value of membrane charge density. :param neuron: neuron object :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param amps: US amplitudes (Pa) :return: figure handle ''' # Determine stimulation modality stim_type = 'US' logger.info('plotting equilibrium charges for %s stimulation', stim_type) # Create figure fig, ax = plt.subplots(figsize=(6, 4)) figname = '{} neuron - charge stability vs. amplitude @ {:.0f}%DC'.format(neuron.name, DC * 1e2) ax.set_title(figname) ax.set_xlabel('Amplitude ({})'.format({'US': 'kPa', 'elec': 'mA/m2'}[stim_type]), fontsize=fs) ax.set_ylabel('$\\rm Q_m\ (nC/cm^2)$', fontsize=fs) if xscale == 'log': ax.set_xscale('log') for skey in ['top', 'right']: ax.spines[skey].set_visible(False) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) Afactor = 1e-3 # Plot charge SFPs and UFPs for each acoustic amplitude SFPs, UFPs = getQSSFixedPointsvsAdrive( nbls, Fdrive, amps, DC, mpi=mpi, loglevel=loglevel) if len(SFPs) > 0: A_SFPs, Q_SFPs = np.array(SFPs).T ax.scatter(np.array(A_SFPs) * Afactor, np.array(Q_SFPs) * 1e5, marker='.', s=20, facecolors='g', edgecolors='none', label='QSS stable fixed points') if len(UFPs) > 0: A_UFPs, Q_UFPs = np.array(UFPs).T ax.scatter(np.array(A_UFPs) * Afactor, np.array(Q_UFPs) * 1e5, marker='.', s=20, facecolors='r', edgecolors='none', label='QSS unstable fixed points') # Plot charge asymptotic stabilization points from simulations for each acoustic amplitude if compdir is not None: stab_points = getSimFixedPointsvsAdrive( nbls, Fdrive, amps, tstim, toffset, PRF, DC, outputdir=compdir, mpi=mpi, loglevel=loglevel) if len(stab_points) > 0: A_stab, Q_stab = np.array(stab_points).T ax.scatter(np.array(A_stab) * Afactor, np.array(Q_stab) * 1e5, marker='o', s=20, facecolors='none', edgecolors='k', label='stabilization points from simulations') # Post-process figure ax.set_ylim(np.array([neuron.Qm0 - 10e-5, 0]) * 1e5) ax.legend(frameon=False, fontsize=fs) fig.tight_layout() fig.canvas.set_window_title('{}_QSS_Qstab_vs_{}A_{}_{:.0f}%DC{}'.format( neuron.name, xscale, stim_type, DC * 1e2, '_with_comp' if compdir is not None else '' )) return fig diff --git a/PySONIC/plt/__init__.py b/PySONIC/plt/__init__.py index d27ed02..ac868a4 100644 --- a/PySONIC/plt/__init__.py +++ b/PySONIC/plt/__init__.py @@ -1,15 +1,14 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-06-06 13:36:00 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-26 15:23:16 +# @Last Modified time: 2019-06-06 18:08:23 from .pltutils import * -from .batch import plotBatch -from .comp import plotComp +from .timeseries import ComparativePlot, SchemePlot from .actmap import * from .QSS import * from .spikeutils import plotSpikingMetrics, plotPhasePlane, plotFRProfile from .effvars import plotEffectiveVariables \ No newline at end of file diff --git a/PySONIC/plt/actmap.py b/PySONIC/plt/actmap.py index 9dc242a..2712857 100644 --- a/PySONIC/plt/actmap.py +++ b/PySONIC/plt/actmap.py @@ -1,632 +1,632 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-09-26 16:47:18 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-05-31 15:26:49 +# @Last Modified time: 2019-06-06 15:19:44 import os import ntpath import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib from matplotlib.ticker import FormatStrFormatter from ..core import NeuronalBilayerSonophore from ..utils import logger, si_format from ..postpro import findPeaks from ..constants import * -from ..neurons import getNeuronsDict +from ..neurons import getPointNeuron from .pltutils import cm2inch, computeMeshEdges class ActivationMap: def __init__(self, root, neuron, a, Fdrive, tstim, PRF): self.root = root - self.neuron = getNeuronsDict()[neuron]() + self.neuron = getPointNeuron(neuron) self.a = a self.nbls = NeuronalBilayerSonophore(self.a, self.neuron) self.Fdrive = Fdrive self.tstim = tstim self.PRF = PRF self.out_fname = 'actmap {} {}Hz PRF{}Hz {}s.csv'.format( self.neuron.name, *si_format([self.Fdrive, self.PRF, self.tstim], space='')) self.out_fpath = os.path.join(self.root, self.out_fname) def cacheMap(self): # Load activation map from file if it exists if os.path.isfile(self.out_fpath): logger.info('Loading activation map for %s neuron', self.neuron.name) actmap = np.loadtxt(actmap_filepath, delimiter=',') else: # Save activation map to file self.compute(amps, DCs) np.savetxt(self.out_fpath, actmap, delimiter=',') def compute(self, amps, DCs): logger.info('Generating activation map for %s neuron', self.neuron.name) actmap = np.empty((amps.size, DCs.size)) nfiles = DCs.size * amps.size for i, A in enumerate(amps): for j, DC in enumerate(DCs): fname = '{}.pkl'.format(nbls.filecode(Fdrive, A, tstim, 0., PRF, DC, 'sonic')) fpath = os.path.join(root, fname) if not os.path.isfile(fpath): logger.error('"{}" file not found'.format(fname)) actmap[i, j] = np.nan else: # Load data logger.debug('Loading file {}/{}: "{}"'.format( i * amps.size + j + 1, nfiles, fname)) with open(fpath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values Qm = df['Qm'].values dt = t[1] - t[0] # Detect spikes on charge profile during stimulus mpd = int(np.ceil(SPIKE_MIN_DT / dt)) ispikes, *_ = findPeaks( Qm[t <= tstim], mph=SPIKE_MIN_QAMP, mpd=mpd, mpp=SPIKE_MIN_QPROM ) # Compute firing metrics if ispikes.size == 0: # if no spike, assign -1 actmap[i, j] = -1 elif ispikes.size == 1: # if only 1 spike, assign 0 actmap[i, j] = 0 else: # if more than 1 spike, assign firing rate FRs = 1 / np.diff(t[ispikes]) actmap[i, j] = np.mean(FRs) return actmap def onClick(self, event, amps, DCs, meshedges, tmax, Vbounds): ''' Retrieve the specific input parameters of the x and y dimensions when the user clicks on a cell in the 2D map, and define filename from it. ''' # Get DC and A from x and y coordinates x, y = event.xdata, event.ydata DC = DCs[np.searchsorted(meshedges[0], x * 1e-2) - 1] Adrive = amps[np.searchsorted(meshedges[1], y * 1e3) - 1] # Define filepath fname = '{}.pkl'.format(self.nbls.filecode( self.Fdrive, Adrive, self.tstim, 0., self.PRF, DC, 'sonic')) fpath = os.path.join(self.root, fname) # Plot Q-trace try: plotQVeff(fpath, tmax=tmax, ybounds=Vbounds) plotFRspectrum(fpath) plt.show() except FileNotFoundError as err: logger.error(err) def plotQVeff(self, filepath, tonset=10, tmax=None, ybounds=None, fs=8, lw=1): ''' Plot superimposed profiles of membrane charge density and effective membrane potential. :param filepath: full path to the data file :param tonset: pre-stimulus onset to add to profiles (ms) :param tmax: max time value showed on graph (ms) :param ybounds: y-axis bounds (mV / nC/cm2) :return: handle to the generated figure ''' # Check file existence fname = ntpath.basename(filepath) if not os.path.isfile(filepath): raise FileNotFoundError('Error: "{}" file does not exist'.format(fname)) # Load data logger.debug('Loading data from "%s"', fname) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] t = df['t'].values * 1e3 # ms Qm = df['Qm'].values * 1e5 # nC/cm2 Vm = df['Vm'].values # mV # Add onset to profiles t = np.hstack((np.array([-tonset, t[0]]), t)) Vm = np.hstack((np.array([self.neuron.Vm0] * 2), Vm)) Qm = np.hstack((np.array([Qm[0]] * 2), Qm)) # Determine axes bounds if tmax is None: tmax = t.max() if ybounds is None: ybounds = (min(Vm.min(), Qm.min()), max(Vm.max(), Qm.max())) # Create figure fig, ax = plt.subplots(figsize=cm2inch(7, 3)) fig.canvas.set_window_title(fname) plt.subplots_adjust(left=0.2, bottom=0.2, right=0.95, top=0.95) for key in ['top', 'right']: ax.spines[key].set_visible(False) for key in ['bottom', 'left']: ax.spines[key].set_position(('axes', -0.03)) ax.spines[key].set_linewidth(2) ax.yaxis.set_tick_params(width=2) ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) ax.set_xlim((-tonset, tmax)) ax.set_xticks([]) ax.set_xlabel('{}s'.format(si_format((tonset + tmax) * 1e-3, space=' ')), fontsize=fs) ax.set_ylabel('mV - $\\rm nC/cm^2$', fontsize=fs, labelpad=-15) ax.set_ylim(ybounds) ax.set_yticks(ybounds) for item in ax.get_yticklabels(): item.set_fontsize(fs) # Plot Qm and Vmeff profiles ax.plot(t, Vm, color='darkgrey', linewidth=lw) ax.plot(t, Qm, color='k', linewidth=lw) # fig.tight_layout() return fig def plotFRspectrum(self, filepath, FRbounds=None, fs=8, lw=1): ''' Plot firing rate specturm. :param filepath: full path to the data file :param FRbounds: firing rate bounds (Hz) :return: handle to the generated figure ''' # Determine FR bounds if FRbounds is None: FRbounds = (1e0, 1e3) # Check file existence fname = ntpath.basename(filepath) if not os.path.isfile(filepath): raise FileNotFoundError('Error: "{}" file does not exist'.format(fname)) # Load data logger.debug('Loading data from "%s"', fname) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values Qm = df['Qm'].values dt = t[1] - t[0] # Detect spikes on charge profile during stimulus mpd = int(np.ceil(SPIKE_MIN_DT / dt)) ispikes, *_ = findPeaks( Qm[t <= tstim], mph=SPIKE_MIN_QAMP, mpd=mpd, mpp=SPIKE_MIN_QPROM ) # Compute FR spectrum if ispikes.size <= MIN_NSPIKES_SPECTRUM: raise ValueError('Number of spikes is to small to form spectrum') FRs = 1 / np.diff(t[ispikes]) logbins = np.logspace(np.log10(FRbounds[0]), np.log10(FRbounds[1]), 30) # Create figure fig, ax = plt.subplots(figsize=cm2inch(7, 3)) fig.canvas.set_window_title(fname) for key in ['top', 'right']: ax.spines[key].set_visible(False) ax.set_xlim(FRbounds) ax.set_xlabel('Firing rate (Hz)', fontsize=fs) ax.set_ylabel('Density', fontsize=fs) for item in ax.get_yticklabels(): item.set_fontsize(fs) ax.hist(FRs, bins=logbins, density=True, color='k') ax.set_xscale('log') fig.tight_layout() return fig def getActivationMap(root, nbls, Fdrive, tstim, PRF, amps, DCs): ''' Compute the activation map of a neuron with specific sonophore radius at a given frequency and PRF, by computing the spiking metrics of simulation results over a 2D space (amplitude x duty cycle). :param root: directory containing the input data files :param neuron: neuron name :param a: sonophore radius :param Fdrive: US frequency (Hz) :param tstim: duration of US stimulation (s) :param PRF: pulse repetition frequency (Hz) :param amps: vector of acoustic amplitudes (Pa) :param DCs: vector of duty cycles (-) :return the activation matrix ''' # Load activation map from file if it exists actmap_filename = 'actmap {} {}Hz PRF{}Hz {}s.csv'.format( nbls.neuron.name, *si_format([Fdrive, PRF, tstim], space='')) actmap_filepath = os.path.join(root, actmap_filename) if os.path.isfile(actmap_filepath): logger.info('Loading activation map for %s neuron', nbls.neuron.name) return np.loadtxt(actmap_filepath, delimiter=',') # Otherwise generate it logger.info('Generating activation map for %s neuron', nbls.neuron.name) actmap = np.empty((amps.size, DCs.size)) nfiles = DCs.size * amps.size for i, A in enumerate(amps): for j, DC in enumerate(DCs): fname = '{}.pkl'.format(nbls.filecode(Fdrive, A, tstim, 0., PRF, DC, 'sonic')) fpath = os.path.join(root, fname) if not os.path.isfile(fpath): logger.error('"{}" file not found'.format(fname)) actmap[i, j] = np.nan else: # Load data logger.debug('Loading file {}/{}: "{}"'.format(i * amps.size + j + 1, nfiles, fname)) with open(fpath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values Qm = df['Qm'].values dt = t[1] - t[0] # Detect spikes on charge profile during stimulus mpd = int(np.ceil(SPIKE_MIN_DT / dt)) ispikes, *_ = findPeaks( Qm[t <= tstim], mph=SPIKE_MIN_QAMP, mpd=mpd, mpp=SPIKE_MIN_QPROM ) # Compute firing metrics if ispikes.size == 0: # if no spike, assign -1 actmap[i, j] = -1 elif ispikes.size == 1: # if only 1 spike, assign 0 actmap[i, j] = 0 else: # if more than 1 spike, assign firing rate FRs = 1 / np.diff(t[ispikes]) actmap[i, j] = np.mean(FRs) # Save activation map to file np.savetxt(actmap_filepath, actmap, delimiter=',') return actmap def onClick(event, root, nbls, Fdrive, tstim, PRF, amps, DCs, meshedges, tmax, Vbounds): ''' Retrieve the specific input parameters of the x and y dimensions when the user clicks on a cell in the 2D map, and define filename from it. ''' # Get DC and A from x and y coordinates x, y = event.xdata, event.ydata DC = DCs[np.searchsorted(meshedges[0], x * 1e-2) - 1] Adrive = amps[np.searchsorted(meshedges[1], y * 1e3) - 1] # Define filepath fname = '{}.pkl'.format(nbls.filecode(Fdrive, Adrive, tstim, 0., PRF, DC, 'sonic')) filepath = os.path.join(root, fname) # Plot Q-trace try: plotQVeff(filepath, tmax=tmax, ybounds=Vbounds) plotFRspectrum(filepath) plt.show() except FileNotFoundError as err: logger.error(err) def plotQVeff(filepath, tonset=10, tmax=None, ybounds=None, fs=8, lw=1): ''' Plot superimposed profiles of membrane charge density and effective membrane potential. :param filepath: full path to the data file :param tonset: pre-stimulus onset to add to profiles (ms) :param tmax: max time value showed on graph (ms) :param ybounds: y-axis bounds (mV / nC/cm2) :return: handle to the generated figure ''' # Check file existence fname = ntpath.basename(filepath) if not os.path.isfile(filepath): raise FileNotFoundError('Error: "{}" file does not exist'.format(fname)) # Load data logger.debug('Loading data from "%s"', fname) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] t = df['t'].values * 1e3 # ms Qm = df['Qm'].values * 1e5 # nC/cm2 Vm = df['Vm'].values # mV # Add onset to profiles t = np.hstack((np.array([-tonset, t[0]]), t)) - Vm = np.hstack((np.array([getNeuronsDict()[meta['neuron']]().Vm0] * 2), Vm)) + Vm = np.hstack((np.array([getPointNeuron(meta['neuron']).Vm0] * 2), Vm)) Qm = np.hstack((np.array([Qm[0]] * 2), Qm)) # Determine axes bounds if tmax is None: tmax = t.max() if ybounds is None: ybounds = (min(Vm.min(), Qm.min()), max(Vm.max(), Qm.max())) # Create figure fig, ax = plt.subplots(figsize=cm2inch(7, 3)) fig.canvas.set_window_title(fname) plt.subplots_adjust(left=0.2, bottom=0.2, right=0.95, top=0.95) for key in ['top', 'right']: ax.spines[key].set_visible(False) for key in ['bottom', 'left']: ax.spines[key].set_position(('axes', -0.03)) ax.spines[key].set_linewidth(2) ax.yaxis.set_tick_params(width=2) ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) ax.set_xlim((-tonset, tmax)) ax.set_xticks([]) ax.set_xlabel('{}s'.format(si_format((tonset + tmax) * 1e-3, space=' ')), fontsize=fs) ax.set_ylabel('mV - $\\rm nC/cm^2$', fontsize=fs, labelpad=-15) ax.set_ylim(ybounds) ax.set_yticks(ybounds) for item in ax.get_yticklabels(): item.set_fontsize(fs) # Plot Qm and Vmeff profiles ax.plot(t, Vm, color='darkgrey', linewidth=lw) ax.plot(t, Qm, color='k', linewidth=lw) # fig.tight_layout() return fig def plotFRspectrum(filepath, FRbounds=None, fs=8, lw=1): ''' Plot firing rate specturm. :param filepath: full path to the data file :param FRbounds: firing rate bounds (Hz) :return: handle to the generated figure ''' # Determine FR bounds if FRbounds is None: FRbounds = (1e0, 1e3) # Check file existence fname = ntpath.basename(filepath) if not os.path.isfile(filepath): raise FileNotFoundError('Error: "{}" file does not exist'.format(fname)) # Load data logger.debug('Loading data from "%s"', fname) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values Qm = df['Qm'].values dt = t[1] - t[0] # Detect spikes on charge profile during stimulus mpd = int(np.ceil(SPIKE_MIN_DT / dt)) ispikes, *_ = findPeaks( Qm[t <= tstim], mph=SPIKE_MIN_QAMP, mpd=mpd, mpp=SPIKE_MIN_QPROM ) # Compute FR spectrum if ispikes.size <= MIN_NSPIKES_SPECTRUM: raise ValueError('Number of spikes is to small to form spectrum') FRs = 1 / np.diff(t[ispikes]) logbins = np.logspace(np.log10(FRbounds[0]), np.log10(FRbounds[1]), 30) # Create figure fig, ax = plt.subplots(figsize=cm2inch(7, 3)) fig.canvas.set_window_title(fname) for key in ['top', 'right']: ax.spines[key].set_visible(False) ax.set_xlim(FRbounds) ax.set_xlabel('Firing rate (Hz)', fontsize=fs) ax.set_ylabel('Density', fontsize=fs) for item in ax.get_yticklabels(): item.set_fontsize(fs) ax.hist(FRs, bins=logbins, density=True, color='k') ax.set_xscale('log') fig.tight_layout() return fig def plotActivationMap(root, neuron, a, Fdrive, tstim, PRF, amps, DCs, Ascale='log', FRscale='log', FRbounds=None, title=None, fs=8, thrs=True, connect=False, tmax=None, Vbounds=None): ''' Plot a neuron's activation map over the amplitude x duty cycle 2D space. :param root: directory containing the input data files :param neuron: neuron name :param a: sonophore radius :param Fdrive: US frequency (Hz) :param tstim: duration of US stimulation (s) :param PRF: pulse repetition frequency (Hz) :param amps: vector of acoustic amplitudes (Pa) :param DCs: vector of duty cycles (-) :param Ascale: scale to use for the amplitude dimension ('lin' or 'log') :param FRscale: scale to use for the firing rate coloring ('lin' or 'log') :param FRbounds: lower and upper bounds of firing rate color-scale :param title: figure title :param fs: fontsize to use for the title and labels :return: 3-tuple with the handle to the generated figure and the mesh x and y coordinates ''' - neuronobj = getNeuronsDict()[neuron]() + neuronobj = getPointNeuron(neuron) nbls = NeuronalBilayerSonophore(a, neuronobj) # Get activation map actmap = getActivationMap(root, nbls, Fdrive, tstim, PRF, amps, DCs) # Check firing rate bounding minFR, maxFR = (actmap[actmap > 0].min(), actmap.max()) logger.info('FR range: %.0f - %.0f Hz', minFR, maxFR) if FRbounds is None: FRbounds = (minFR, maxFR) else: if minFR < FRbounds[0]: logger.warning('Minimal firing rate (%.0f Hz) is below defined lower bound (%.0f Hz)', minFR, FRbounds[0]) if maxFR > FRbounds[1]: logger.warning('Maximal firing rate (%.0f Hz) is above defined upper bound (%.0f Hz)', maxFR, FRbounds[1]) # Plot activation map if FRscale == 'lin': norm = matplotlib.colors.Normalize(*FRbounds) elif FRscale == 'log': norm = matplotlib.colors.LogNorm(*FRbounds) fig, ax = plt.subplots(figsize=cm2inch(8, 5.8)) fig.subplots_adjust(left=0.15, bottom=0.15, right=0.8, top=0.92) if title is None: title = '{} neuron @ {}Hz, {}Hz PRF ({}m sonophore)'.format( neuron, *si_format([Fdrive, PRF, a])) ax.set_title(title, fontsize=fs) if Ascale == 'log': ax.set_yscale('log') ax.set_xlabel('Duty cycle (%)', fontsize=fs, labelpad=-0.5) ax.set_ylabel('Amplitude (kPa)', fontsize=fs) ax.set_xlim(np.array([DCs.min(), DCs.max()]) * 1e2) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) xedges = computeMeshEdges(DCs) yedges = computeMeshEdges(amps, scale=Ascale) actmap[actmap == -1] = np.nan actmap[actmap == 0] = 1e-3 cmap = plt.get_cmap('viridis') cmap.set_bad('silver') cmap.set_under('k') ax.pcolormesh(xedges * 1e2, yedges * 1e-3, actmap, cmap=cmap, norm=norm) if thrs: Athrs_fname = 'Athrs_{}_{:.0f}nm_{}Hz_PRF{}Hz_{}s.xlsx'.format( neuron, a * 1e9, *si_format([Fdrive, PRF, tstim], 0, space='')) fpath = os.path.join(root, Athrs_fname) if os.path.isfile(fpath): df = pd.read_excel(fpath, sheet_name='Data') DCs = df['Duty factor'].values Athrs = df['Adrive (kPa)'].values iDCs = np.argsort(DCs) DCs = DCs[iDCs] Athrs = Athrs[iDCs] ax.plot(DCs * 1e2, Athrs, '-', color='#F26522', linewidth=2, label='threshold amplitudes') ax.legend(loc='lower center', frameon=False, fontsize=8) else: logger.warning('%s file not found -> cannot draw threshold curve', fpath) # # Plot rheobase amplitudes if specified # if rheobase: # logger.info('Computing rheobase amplitudes') # dDC = 0.01 # DCs_dense = np.arange(dDC, 100 + dDC / 2, dDC) / 1e2 - # neuronobj = getNeuronsDict()[neuron]() + # neuronobj = getPointNeuron(neuron) # nbls = NeuronalBilayerSonophore(a, neuronobj) # Athrs = nbls.findRheobaseAmps(DCs_dense, Fdrive, neuronobj.VT)[0] # ax.plot(DCs_dense * 1e2, Athrs * 1e-3, '-', color='#F26522', linewidth=2, # label='threshold amplitudes') # ax.legend(loc='lower center', frameon=False, fontsize=8) # Plot firing rate colorbar sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm._A = [] pos1 = ax.get_position() # get the map axis position cbarax = fig.add_axes([pos1.x1 + 0.02, pos1.y0, 0.03, pos1.height]) fig.colorbar(sm, cax=cbarax) cbarax.set_ylabel('Firing rate (Hz)', fontsize=fs) for item in cbarax.get_yticklabels(): item.set_fontsize(fs) # Link callback to figure if connect: fig.canvas.mpl_connect( 'button_press_event', lambda event: onClick(event, root, nbls, Fdrive, tstim, PRF, amps, DCs, (xedges, yedges), tmax, Vbounds) ) return fig def plotAstimRheobaseAmps(neuron, radii, freqs, fs=12): ''' Plot threshold excitation amplitudes (determined by quasi-steady approximation) of a specific neuron as a function of duty cycle, for various combinations of sonophore radius and US frequency. :param neuron: neuron object :param radii: list of sonophore radii (m) :param freqs: list US frequencies (Hz) :return: figure handle ''' linestyles = ['-', '--', ':', '-.'] assert len(freqs) <= len(linestyles), 'too many frequencies' fig, ax = plt.subplots() ax.set_title('{} neuron: rheobase amplitude profiles'.format(neuron.name), fontsize=fs) ax.set_xlabel('Duty cycle (%)', fontsize=fs) ax.set_ylabel('Threshold amplitude (kPa)', fontsize=fs) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) ax.set_yscale('log') ax.set_xlim([0, 100]) ax.set_ylim([10, 600]) DCs = np.arange(1, 101) / 1e2 for i, a in enumerate(radii): nbls = NeuronalBilayerSonophore(a, neuron) for j, Fdrive in enumerate(freqs): Athrs, Aref = nbls.findRheobaseAmps(DCs, Fdrive, neuron.VT) color = 'C{}'.format(i) lbl = '{:.0f} nm radius sonophore, {}Hz'.format(a * 1e9, si_format(Fdrive, 1, space=' ')) ax.plot(DCs * 1e2, Athrs * 1e-3, linestyles[j], c=color, label=lbl) ax.legend(fontsize=fs, frameon=False) fig.tight_layout() return fig def plotEstimRheobaseAmps(neurons, fs=15): fig, ax = plt.subplots() ax.set_title('Rheobase amplitudes', fontsize=fs) ax.set_xlabel('Duty cycle (%)', fontsize=fs) ax.set_ylabel('Threshold amplitude (mA/m2)', fontsize=fs) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) ax.set_yscale('log') ax.set_ylim([1e0, 1e3]) DCs = np.arange(1, 101) / 1e2 for neuron in neurons: Athrs = neuron.findRheobaseAmps(DCs, neuron.VT) ax.plot(DCs * 1e2, Athrs, label='{} neuron'.format(neuron.name)) ax.legend(fontsize=fs, frameon=False) fig.tight_layout() return fig diff --git a/PySONIC/plt/batch.py b/PySONIC/plt/batch.py deleted file mode 100644 index f3c8e5b..0000000 --- a/PySONIC/plt/batch.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2018-09-25 16:19:19 -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-26 21:07:19 - -import numpy as np -import matplotlib.pyplot as plt - -from ..utils import * -from .pltutils import * - - -def plotBatch(filepaths, pltscheme=None, plt_save=False, directory=None, - ask_before_save=True, fig_ext='png', tag='fig', fs=10, lw=2, title=True, - show_patches=True, frequency=1): - ''' Plot a figure with profiles of several specific NICE output variables, for several - NICE simulations. - - :param filepaths: list of full paths to output data files to be compared - :param pltscheme: dict of lists of variables names to extract and plot together - :param plt_save: boolean stating whether to save the created figures - :param directory: directory where to save figures - :param ask_before_save: boolean stating whether to show the created figures - :param fig_ext: file extension for the saved figures - :param tag: suffix added to the end of the figures name - :param fs: labels font size - :param lw: curves line width - :param title: boolean stating whether to display a general title on the figures - :param show_patches: boolean indicating whether to indicate periods of stimulation with - colored rectangular patches - :param frequency: downsampling frequency for time series - :return: list of figure handles - ''' - - figs = [] - - # Loop through data files - for filepath in filepaths: - - # Retrieve file code and sim type from file name - pkl_filename = os.path.basename(filepath) - filecode = pkl_filename[0:-4] - sim_type = getSimType(pkl_filename) - - # Load data and extract variables - df, meta = loadData(filepath, frequency) - t = df['t'].values - try: - stimstate = df['stimstate'].values - except KeyError: - stimstate = df['states'].values - - # Determine stimulus patch from stimstate - _, tpatch_on, tpatch_off = getStimPulses(t, stimstate) - - # Initialize appropriate object - obj = getObject(sim_type, meta) - - # Retrieve plot variables - tvar, pltvars = getTimePltVar(obj.tscale), obj.getPltVars() - - # Check plot scheme if provided, otherwise generate it - if pltscheme: - for key in list(sum(list(pltscheme.values()), [])): - if key not in pltvars: - raise KeyError('Unknown plot variable: "{}"'.format(key)) - else: - pltscheme = obj.getPltScheme() - - # Preset and rescale time vector - if tvar['onset'] > 0.0: - tonset = np.array([-tvar['onset'], -t[0] - t[1]]) - t = np.hstack((tonset, t)) - t *= tvar['factor'] - - # Create figure - naxes = len(pltscheme) - if naxes == 1: - fig, ax = plt.subplots(figsize=(11, 4)) - axes = [ax] - else: - fig, axes = plt.subplots(naxes, 1, figsize=(11, min(3 * naxes, 9))) - - # Loop through each subgraph - for ax, (grouplabel, keys) in zip(axes, pltscheme.items()): - - # Extract variables to plot - nvars = len(keys) - ax_pltvars = [pltvars[k] for k in keys] - if nvars == 1: - ax_pltvars[0]['color'] = 'k' - ax_pltvars[0]['ls'] = '-' - - # Set y-axis unit and bounds - ax.set_ylabel('$\\rm {}\ ({})$'.format(grouplabel, ax_pltvars[0].get('unit', '')), - fontsize=fs) - if 'bounds' in ax_pltvars[0]: - ax_min = min([ap['bounds'][0] for ap in ax_pltvars]) - ax_max = max([ap['bounds'][1] for ap in ax_pltvars]) - ax.set_ylim(ax_min, ax_max) - - # Plot time series - icolor = 0 - for pltvar, name in zip(ax_pltvars, pltscheme[grouplabel]): - var = extractPltVar(obj, pltvar, df, meta, t.size, name) - ax.plot(t, var, pltvar.get('ls', '-'), c=pltvar.get('color', 'C{}'.format(icolor)), - lw=lw, label='$\\rm {}$'.format(pltvar['label'])) - if 'color' not in pltvar: - icolor += 1 - - # Add legend - if nvars > 1 or 'gate' in ax_pltvars[0]['desc']: - ax.legend(fontsize=fs, loc=7, ncol=nvars // 4 + 1, frameon=False) - - # Post-process figure - for ax in axes: - for item in ['top', 'right']: - ax.spines[item].set_visible(False) - ax.locator_params(axis='y', nbins=2) - for item in ax.get_yticklabels(): - item.set_fontsize(fs) - for ax in axes[:-1]: - ax.set_xticklabels([]) - for item in axes[-1].get_xticklabels(): - item.set_fontsize(fs) - axes[-1].set_xlabel('$\\rm {}\ ({})$'.format(tvar['label'], tvar['unit']), fontsize=fs) - if show_patches == 1: - for ax in axes: - plotStimPatches(ax, tpatch_on, tpatch_off, tvar['factor']) - if title: - axes[0].set_title(figtitle(meta), fontsize=fs) - fig.tight_layout() - - # Save figure if needed (automatic or checked) - if plt_save: - if directory is None: - directory = os.path.split(filepath)[0] - if ask_before_save: - plt_filename = SaveFileDialog( - '{}_{}.{}'.format(filecode, tag, fig_ext), - dirname=directory, ext=fig_ext) - else: - plt_filename = '{}/{}_{}.{}'.format(directory, filecode, tag, fig_ext) - if plt_filename: - plt.savefig(plt_filename) - logger.info('Saving figure as "{}"'.format(plt_filename)) - plt.close() - - figs.append(fig) - return figs diff --git a/PySONIC/plt/comp.py b/PySONIC/plt/comp.py deleted file mode 100644 index 217f21b..0000000 --- a/PySONIC/plt/comp.py +++ /dev/null @@ -1,293 +0,0 @@ -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2018-09-25 16:18:45 -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-26 11:48:36 - -import ntpath -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle -from matplotlib.ticker import FormatStrFormatter - -from ..utils import * -from .pltutils import * - - -class InteractiveLegend(object): - ''' Class defining an interactive matplotlib legend, where lines visibility can - be toggled by simply clicking on the corresponding legend label. Other graphic - objects can also be associated to the toggle of a specific line - - Adapted from: - http://stackoverflow.com/questions/31410043/hiding-lines-after-showing-a-pyplot-figure - ''' - - def __init__(self, legend, aliases): - self.legend = legend - self.fig = legend.axes.figure - self.lookup_artist, self.lookup_handle = self._build_lookups(legend) - self._setup_connections() - self.handles_aliases = aliases - self.update() - - def _setup_connections(self): - for artist in self.legend.texts + self.legend.legendHandles: - artist.set_picker(10) # 10 points tolerance - - self.fig.canvas.mpl_connect('pick_event', self.on_pick) - - def _build_lookups(self, legend): - ''' Method of the InteractiveLegend class building - the legend lookups. ''' - - labels = [t.get_text() for t in legend.texts] - handles = legend.legendHandles - label2handle = dict(zip(labels, handles)) - handle2text = dict(zip(handles, legend.texts)) - - lookup_artist = {} - lookup_handle = {} - for artist in legend.axes.get_children(): - if artist.get_label() in labels: - handle = label2handle[artist.get_label()] - lookup_handle[artist] = handle - lookup_artist[handle] = artist - lookup_artist[handle2text[handle]] = artist - - lookup_handle.update(zip(handles, handles)) - lookup_handle.update(zip(legend.texts, handles)) - - return lookup_artist, lookup_handle - - def on_pick(self, event): - handle = event.artist - if handle in self.lookup_artist: - artist = self.lookup_artist[handle] - artist.set_visible(not artist.get_visible()) - self.update() - - def update(self): - for artist in self.lookup_artist.values(): - handle = self.lookup_handle[artist] - if artist.get_visible(): - handle.set_visible(True) - if artist in self.handles_aliases: - for al in self.handles_aliases[artist]: - al.set_visible(True) - else: - handle.set_visible(False) - if artist in self.handles_aliases: - for al in self.handles_aliases[artist]: - al.set_visible(False) - self.fig.canvas.draw() - - def show(self): - plt.show() - - -def plotComp(filepaths, varname, labels=None, fs=10, lw=2, colors=None, lines=None, patches='one', - xticks=None, yticks=None, blacklegend=False, straightlegend=False, - inset=None, figsize=(11, 4)): - ''' Compare profiles of several specific output variables of NICE simulations. - - :param filepaths: list of full paths to output data files to be compared - :param varname: name of variable to extract and compare - :param labels: list of labels to use in the legend - :param fs: labels fontsize - :param patches: string indicating whether to indicate periods of stimulation with - colored rectangular patches - ''' - - # Input check: labels - if labels is not None: - if len(labels) != len(filepaths): - raise AssertionError('Invalid labels ({}): not matching number of compared files ({})' - .format(len(labels), len(filepaths))) - if not all(isinstance(x, str) for x in labels): - raise TypeError('Invalid labels: must be string typed') - - # Input check: line styles and colors - if colors is None: - colors = ['C{}'.format(j) for j in range(len(filepaths))] - if lines is None: - lines = ['-'] * len(filepaths) - - # Input check: STIM-ON patches - greypatch = False - if patches == 'none': - patches = [False] * len(filepaths) - elif patches == 'all': - patches = [True] * len(filepaths) - elif patches == 'one': - patches = [True] + [False] * (len(filepaths) - 1) - greypatch = True - elif isinstance(patches, list): - if len(patches) != len(filepaths): - raise AssertionError('Invalid patches ({}): not matching number of compared files ({})' - .format(len(patches), len(filepaths))) - if not all(isinstance(p, bool) for p in patches): - raise TypeError('Invalid patch sequence: all list items must be boolean typed') - else: - raise ValueError('Invalid patches: must be either "none", all", "one", or a boolean list') - - # Create figure - fig, ax = plt.subplots(figsize=figsize) - ax.set_zorder(0) - if inset is not None: - inset_ax = fig.add_axes(ax.get_position()) - inset_ax.set_zorder(1) - inset_ax.set_xlim(inset['xlims'][0], inset['xlims'][1]) - inset_ax.set_ylim(inset['ylims'][0], inset['ylims'][1]) - inset_ax.set_xticks([]) - inset_ax.set_yticks([]) - # inset_ax.patch.set_alpha(1.0) - inset_ax.add_patch(Rectangle((inset['xlims'][0], inset['ylims'][0]), - inset['xlims'][1] - inset['xlims'][0], - inset['ylims'][1] - inset['ylims'][0], - color='w')) - - # Loop through data files - aliases = {} - for j, filepath in enumerate(filepaths): - - # Retrieve sim type - pkl_filename = ntpath.basename(filepath) - sim_type = getSimType(pkl_filename) - - if j == 0: - sim_type_ref = sim_type - elif sim_type != sim_type_ref: - raise ValueError('Invalid comparison: different simulation types') - - # Load data and extract variables - df, meta = loadData(filepath) - t = df['t'].values - stimstate = df['stimstate'].values - - # Determine stimulus patch from stimstate - _, tpatch_on, tpatch_off = getStimPulses(t, stimstate) - - # Initialize appropriate object - obj = getObject(sim_type, meta) - - # Retrieve plot variables - tvar, pltvars = getTimePltVar(obj.tscale), obj.getPltVars() - - # Retrieve appropriate plot variable - if varname not in pltvars: - raise KeyError('Unknown plot variable: "{}". Possible plot variables are: {}'.format( - varname, ', '.join(['"{}"'.format(p) for p in pltvars.keys()]))) - pltvar = pltvars[varname] - - # Preset and rescale time vector - if tvar['onset'] > 0.0: - tonset = np.array([-tvar['onset'], -t[0] - t[1]]) - t = np.hstack((tonset, t)) - t *= tvar['factor'] - - # Extract variable and plot time series - var = extractPltVar(obj, pltvar, df, meta, t.size, varname) - handle = ax.plot(t, var, linewidth=lw, linestyle=lines[j], color=colors[j], - label=labels[j] if labels is not None else figtitle(meta)) - - if inset is not None: - inset_window = np.logical_and(t > (inset['xlims'][0] / tvar['factor']), - t < (inset['xlims'][1] / tvar['factor'])) - inset_ax.plot(t[inset_window] * tvar['factor'], var[inset_window] * pltvar['factor'], - linewidth=lw, linestyle=lines[j], color=colors[j]) - - # Add optional STIM-ON patches - if patches[j]: - (ybottom, ytop) = ax.get_ylim() - la = [] - color = '#8A8A8A' if greypatch else handle[0].get_color() - for i in range(tpatch_on.size): - la.append(ax.axvspan(tpatch_on[i] * tvar['factor'], tpatch_off[i] * tvar['factor'], - edgecolor='none', facecolor=color, alpha=0.2)) - aliases[handle[0]] = la - - if inset is not None: - cond_on = np.logical_and(tpatch_on > (inset['xlims'][0] / tvar['factor']), - tpatch_on < (inset['xlims'][1] / tvar['factor'])) - cond_off = np.logical_and(tpatch_off > (inset['xlims'][0] / tvar['factor']), - tpatch_off < (inset['xlims'][1] / tvar['factor'])) - cond_glob = np.logical_and(tpatch_on < (inset['xlims'][0] / tvar['factor']), - tpatch_off > (inset['xlims'][1] / tvar['factor'])) - cond_onoff = np.logical_or(cond_on, cond_off) - cond = np.logical_or(cond_onoff, cond_glob) - npatches_inset = np.sum(cond) - for i in range(npatches_inset): - inset_ax.add_patch(Rectangle((tpatch_on[cond][i] * tvar['factor'], ybottom), - (tpatch_off[cond][i] - tpatch_on[cond][i]) * - tvar['factor'], ytop - ybottom, color=color, - alpha=0.1)) - - # Post-process figure - for item in ['top', 'right']: - ax.spines[item].set_visible(False) - if 'bounds' in pltvar: - ax.set_ylim(*pltvar['bounds']) - ax.set_xlabel('$\\rm {}\ ({})$'.format(tvar['label'], tvar['unit']), fontsize=fs) - ax.set_ylabel('$\\rm {}\ ({})$'.format(pltvar['label'], pltvars.get('unit', '')), fontsize=fs) - if xticks is not None: # optional x-ticks - ax.set_xticks(xticks) - if yticks is not None: # optional y-ticks - ax.set_yticks(yticks) - else: - ax.locator_params(axis='y', nbins=2) - if any(ax.get_yticks() < 0): - ax.yaxis.set_major_formatter(FormatStrFormatter('%+.0f')) - for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks(): - tick.label.set_fontsize(fs) - fig.tight_layout() - - # Optional operations on inset: - if inset is not None: - - # Re-position inset axis - axpos = ax.get_position() - left, right, = rescale(inset['xcoords'], ax.get_xlim()[0], ax.get_xlim()[1], - axpos.x0, axpos.x0 + axpos.width) - bottom, top, = rescale(inset['ycoords'], ax.get_ylim()[0], ax.get_ylim()[1], - axpos.y0, axpos.y0 + axpos.height) - inset_ax.set_position([left, bottom, right - left, top - bottom]) - for i in inset_ax.spines.values(): - i.set_linewidth(2) - - # Materialize inset target region with contour frame - ax.plot(inset['xlims'], [inset['ylims'][0]] * 2, linestyle='-', color='k') - ax.plot(inset['xlims'], [inset['ylims'][1]] * 2, linestyle='-', color='k') - ax.plot([inset['xlims'][0]] * 2, inset['ylims'], linestyle='-', color='k') - ax.plot([inset['xlims'][1]] * 2, inset['ylims'], linestyle='-', color='k') - - # Link target and inset with dashed lines if possible - if inset['xcoords'][1] < inset['xlims'][0]: - ax.plot([inset['xcoords'][1], inset['xlims'][0]], - [inset['ycoords'][0], inset['ylims'][0]], - linestyle='--', color='k') - ax.plot([inset['xcoords'][1], inset['xlims'][0]], - [inset['ycoords'][1], inset['ylims'][1]], - linestyle='--', color='k') - elif inset['xcoords'][0] > inset['xlims'][1]: - ax.plot([inset['xcoords'][0], inset['xlims'][1]], - [inset['ycoords'][0], inset['ylims'][0]], - linestyle='--', color='k') - ax.plot([inset['xcoords'][0], inset['xlims'][1]], - [inset['ycoords'][1], inset['ylims'][1]], - linestyle='--', color='k') - else: - logger.warning('Inset x-coordinates intersect with those of target region') - - - # Create interactive legend - leg = ax.legend(loc=1, fontsize=fs, frameon=False) - if blacklegend: - for l in leg.get_lines(): - l.set_color('k') - if straightlegend: - for l in leg.get_lines(): - l.set_linestyle('-') - interactive_legend = InteractiveLegend(ax.legend_, aliases) - - return fig diff --git a/PySONIC/plt/pltutils.py b/PySONIC/plt/pltutils.py index d58a141..5bf860c 100644 --- a/PySONIC/plt/pltutils.py +++ b/PySONIC/plt/pltutils.py @@ -1,133 +1,80 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-08-21 14:33:36 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-05-10 10:38:56 +# @Last Modified time: 2019-06-06 18:26:50 ''' Useful functions to generate plots. ''' -import re import numpy as np import matplotlib -from ..core import BilayerSonophore, NeuronalBilayerSonophore -from ..neurons import getNeuronsDict - # Matplotlib parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' -rgxp = re.compile('(ESTIM|ASTIM)_([A-Za-z]*)_(.*).pkl') -rgxp_mech = re.compile('(MECH)_(.*).pkl') + +def figtitle(meta): + ''' Return appropriate title based on simulation metadata. ''' + if 'Cm0' in meta: + return '{:.0f}nm radius BLS structure: MECH-STIM {:.0f}kHz, {:.2f}kPa, {:.1f}nC/cm2'.format( + meta['a'] * 1e9, meta['Fdrive'] * 1e-3, meta['Adrive'] * 1e-3, meta['Qm'] * 1e5) + else: + if meta['DC'] < 1: + wavetype = 'PW' + suffix = ', {:.2f}Hz PRF, {:.0f}% DC'.format(meta['PRF'], meta['DC'] * 1e2) + else: + wavetype = 'CW' + suffix = '' + if 'Astim' in meta: + return '{} neuron: {} E-STIM {:.2f}mA/m2, {:.0f}ms{}'.format( + meta['neuron'], wavetype, meta['Astim'], meta['tstim'] * 1e3, suffix) + else: + return '{} neuron ({:.1f}nm): {} A-STIM {:.0f}kHz {:.2f}kPa, {:.0f}ms{} - {} model'.format( + meta['neuron'], meta['a'] * 1e9, wavetype, meta['Fdrive'] * 1e-3, + meta['Adrive'] * 1e-3, meta['tstim'] * 1e3, suffix, meta['method']) def cm2inch(*tupl): inch = 2.54 if isinstance(tupl[0], tuple): return tuple(i / inch for i in tupl[0]) else: return tuple(i / inch for i in tupl) -def getTimePltVar(tscale): - ''' Return time plot variable for a given temporal scale. ''' - return { - 'desc': 'time', - 'label': 'time', - 'unit': tscale, - 'factor': {'ms': 1e3, 'us': 1e6}[tscale], - 'onset': {'ms': 1e-3, 'us': 1e-6}[tscale] - } - - -def getSimType(fname): - ''' Get sim type from filename. ''' - for exp in [rgxp, rgxp_mech]: - mo = exp.fullmatch(fname) - if mo: - sim_type = mo.group(1) - if sim_type not in ('MECH', 'ASTIM', 'ESTIM'): - raise ValueError('Invalid simulation type: {}'.format(sim_type)) - return sim_type - raise ValueError('Error: "{}" file does not match regexp pattern'.format(fname)) - - -def getObject(sim_type, meta): - if sim_type == 'MECH': - obj = BilayerSonophore(meta['a'], meta['Cm0'], meta['Qm0']) - else: - obj = getNeuronsDict()[meta['neuron']]() - if sim_type == 'ASTIM': - obj = NeuronalBilayerSonophore(meta['a'], obj, meta['Fdrive']) - return obj - - -def getStimPulses(t, states): - ''' Determine the onset and offset times of pulses from a stimulation vector. - - :param t: time vector (s). - :param states: a vector of stimulation state (ON/OFF) at each instant in time. - :return: 3-tuple with number of patches, timing of STIM-ON an STIM-OFF instants. - ''' - - # Compute states derivatives and identify bounds indexes of pulses - dstates = np.diff(states) - ipulse_on = np.insert(np.where(dstates > 0.0)[0] + 1, 0, 0) - ipulse_off = np.where(dstates < 0.0)[0] + 1 - if ipulse_off.size < ipulse_on.size: - ioff = t.size - 1 - if ipulse_off.size == 0: - ipulse_off = np.array([ioff]) - else: - ipulse_off = np.insert(ipulse_off, ipulse_off.size - 1, ioff) - - # Get time instants for pulses ON and OFF - npulses = ipulse_on.size - tpulse_on = t[ipulse_on] - tpulse_off = t[ipulse_off] - - # return 3-tuple with #pulses, pulse ON and pulse OFF instants - return npulses, tpulse_on, tpulse_off - - -def plotStimPatches(ax, tpatch_on, tpatch_off, tfactor): - for j in range(tpatch_on.size): - ax.axvspan(tpatch_on[j] * tfactor, tpatch_off[j] * tfactor, - edgecolor='none', facecolor='#8A8A8A', alpha=0.2) - - -def extractPltVar(obj, pltvar, df, meta=None, nsamples=0, name=''): +def extractPltVar(model, pltvar, df, meta=None, nsamples=0, name=''): if 'func' in pltvar: - s = 'obj.{}'.format(pltvar['func']) + s = 'model.{}'.format(pltvar['func']) try: var = eval(s) except AttributeError: - var = eval(s.replace('obj', 'obj.neuron')) + var = eval(s.replace('model', 'model.neuron')) elif 'key' in pltvar: var = df[pltvar['key']] elif 'constant' in pltvar: var = eval(pltvar['constant']) * np.ones(nsamples) else: var = df[name] var = var.values.copy() if var.size == nsamples - 2: var = np.hstack((np.array([pltvar.get('y0', var[0])] * 2), var)) var *= pltvar.get('factor', 1) return var def computeMeshEdges(x, scale='lin'): ''' Compute the appropriate edges of a mesh that quads a linear or logarihtmic distribution. :param x: the input vector :param scale: the type of distribution ('lin' for linear, 'log' for logarihtmic) :return: the edges vector ''' if scale == 'log': x = np.log10(x) dx = x[1] - x[0] n = x.size + 1 return {'lin': np.linspace, 'log': np.logspace}[scale](x[0] - dx / 2, x[-1] + dx / 2, n) diff --git a/PySONIC/plt/spikeutils.py b/PySONIC/plt/spikeutils.py index 5d797e2..70492b4 100644 --- a/PySONIC/plt/spikeutils.py +++ b/PySONIC/plt/spikeutils.py @@ -1,377 +1,377 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-10-01 20:40:28 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-15 00:16:56 +# @Last Modified time: 2019-06-06 16:15:14 import pickle import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from ..utils import * from ..constants import * from ..postpro import findPeaks -from .pltutils import cm2inch +from .pltutils import cm2inch, figtitle # Plot parameters phaseplotvars = { 'Vm': { 'label': 'V_m\ (mV)', 'dlabel': 'dV/dt\ (V/s)', 'factor': 1e0, 'lim': (-80.0, 50.0), 'dfactor': 1e-3, 'dlim': (-300, 700), 'thr_amp': SPIKE_MIN_VAMP, 'thr_prom': SPIKE_MIN_VPROM }, 'Qm': { 'label': 'Q_m\ (nC/cm^2)', 'dlabel': 'I\ (A/m^2)', 'factor': 1e5, 'lim': (-80.0, 50.0), 'dfactor': 1e0, 'dlim': (-2, 5), 'thr_amp': SPIKE_MIN_QAMP, 'thr_prom': SPIKE_MIN_QPROM } } def plotPhasePlane(filepaths, varname, no_offset=False, no_first=False, labels=None, colors=None, fs=15, lw=2, tbounds=None, pretty=True): ''' Plot phase-plane diagrams of spiking dynamics from simulation results. :param filepaths: list of full paths to data files :param varname: name of output variable of interest ('Qm' or Vm') :param no_offset: boolean stating whether or not to discard post-offset spikes :param no_first: boolean stating whether or not to discard first spike :param tbounds: spike interval bounds (ms) :return: figure handle ''' # Preprocess parameters if tbounds is None: tbounds = (-1.5, 1.5) pltvar = phaseplotvars[varname] # Create figure fig, axes = plt.subplots(1, 2, figsize=(8, 4)) for ax in axes: ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # 1st axis: variable as function of time ax = axes[0] ax.set_xlabel('$\\rm time\ (ms)$', fontsize=fs) ax.set_ylabel('$\\rm {}$'.format(pltvar['label']), fontsize=fs) ax.set_xlim(tbounds) ax.set_ylim(pltvar['lim']) if pretty: ax.set_xticks(tbounds) ax.set_yticks(pltvar['lim']) ax.set_xticklabels(['{:+.1f}'.format(x) for x in ax.get_xticks()]) ax.set_yticklabels(['{:+.0f}'.format(x) for x in ax.get_yticks()]) # 2nd axis: phase plot (derivative of variable vs variable) ax = axes[1] ax.set_xlabel('$\\rm {}$'.format(pltvar['label']), fontsize=fs) ax.set_ylabel('$\\rm {}$'.format(pltvar['dlabel']), fontsize=fs) ax.set_xlim(pltvar['lim']) ax.set_ylim(pltvar['dlim']) ax.plot([0, 0], [pltvar['dlim'][0], pltvar['dlim'][1]], '--', color='k', linewidth=1) ax.plot([pltvar['lim'][0], pltvar['lim'][1]], [0, 0], '--', color='k', linewidth=1) if pretty: ax.set_xticks(pltvar['lim']) ax.set_yticks(pltvar['dlim']) ax.set_xticklabels(['{:+.0f}'.format(x) for x in ax.get_xticks()]) ax.set_yticklabels(['{:+.0f}'.format(x) for x in ax.get_yticks()]) for ax in axes: for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) handles = [] autolabels = [] # For each file for i, filepath in enumerate(filepaths): # Load data logger.info('loading data from file "{}"'.format(filepath)) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values y = df[varname].values dt = t[1] - t[0] dydt = np.diff(y) / dt # Prominence-based spike detection ispikes, *_, ibounds = findPeaks( y, mph=pltvar['thr_amp'], # mpd=int(np.ceil(SPIKE_MIN_DT / dt)), mpp=pltvar['thr_prom'] ) if len(ispikes) > 0: # Discard potential irrelevant spikes if no_offset: ibounds_right = [x[1] for x in ibounds] inds = np.where(t[ibounds_right] < tstim)[0] ispikes = ispikes[inds] ibounds = ibounds[inds] if no_first: ispikes = ispikes[1:] ibounds = ibounds[1:] # Store spikes in dedicated lists tspikes = [] yspikes = [] dydtspikes = [] for ispike, ibound in zip(ispikes, ibounds): tmin = max(t[ibound[0]], tbounds[0] * 1e-3 + t[ispike]) tmax = min(t[ibound[1]], tbounds[1] * 1e-3 + t[ispike]) inds = np.where((t > tmin) & (t < tmax))[0] tspikes.append(t[inds] - t[ispike]) yspikes.append(y[inds]) dinds = np.hstack(([inds[0] - 1], inds, [inds[-1] + 1])) dydt = np.diff(y[dinds]) / np.diff(t[dinds]) dydtspikes.append((dydt[:-1] + dydt[1:]) / 2) if len(tspikes) == 0: logger.warning('No spikes detected') else: # Plot spikes temporal profiles and phase-plane diagrams for j in range(len(tspikes)): if colors is None: color = 'C{}'.format(i if len(filepaths) > 1 else j % 10) else: color = colors[i] lh = axes[0].plot(tspikes[j] * 1e3, yspikes[j] * pltvar['factor'], linewidth=lw, c=color)[0] axes[1].plot(yspikes[j] * pltvar['factor'], dydtspikes[j] * pltvar['dfactor'], linewidth=lw, c=color) # Populate legend handles.append(lh) autolabels.append(figtitle(meta)) fig.tight_layout() if labels is None: labels = autolabels # Add legend fig.subplots_adjust(top=0.8) if len(filepaths) > 1: axes[0].legend(handles, labels, fontsize=fs, frameon=False, loc='upper center', bbox_to_anchor=(1.0, 1.35)) else: fig.suptitle(labels[0], fontsize=fs) # Return return fig def plotSpikingMetrics(xvar, xlabel, metrics_dict, logscale=False, spikeamp=True, colors=None, fs=8, lw=2, ps=4, figsize=cm2inch(7.25, 5.8)): ''' Plot the evolution of key spiking metrics as function of a specific stimulation parameter. ''' ls = {'full': 'o-', 'sonic': 'o--'} cdefault = {'full': 'silver', 'sonic': 'k'} # Create figure fig, axes = plt.subplots(3, 1, figsize=figsize) ibase = 0 if spikeamp else 1 axes[ibase].set_ylabel('Latency\n (ms)', fontsize=fs, rotation=0, ha='right', va='center') axes[ibase + 1].set_ylabel('Firing\n rate (Hz)', fontsize=fs, rotation=0, ha='right', va='center') if spikeamp: axes[2].set_ylabel('Spike amp.\n ($\\rm nC/cm^2$)', fontsize=fs, rotation=0, ha='right', va='center') for ax in axes: ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) if logscale: ax.set_xscale('log') for item in ax.get_yticklabels(): item.set_fontsize(fs) for ax in axes[:-1]: ax.spines['bottom'].set_visible(False) ax.set_xticks([]) plt.setp(ax.get_xticklabels(minor=True), visible=False) ax.get_xaxis().set_tick_params(which='minor', size=0) ax.get_xaxis().set_tick_params(which='minor', width=0) axes[-1].set_xlabel(xlabel, fontsize=fs) if not logscale: axes[-1].set_xticks([min(xvar), max(xvar)]) for item in axes[-1].get_xticklabels(): item.set_fontsize(fs) # Plot metrics for each neuron for i, neuron in enumerate(metrics_dict.keys()): full_metrics = metrics_dict[neuron]['full'] sonic_metrics = metrics_dict[neuron]['sonic'] c = colors[neuron] if colors is not None else cdefault # Latency rf = 10 ax = axes[ibase] ax.plot(xvar, full_metrics['latencies (ms)'].values, ls['full'], color=c['full'], linewidth=lw, markersize=ps) ax.plot(xvar, sonic_metrics['latencies (ms)'].values, ls['sonic'], color=c['sonic'], linewidth=lw, markersize=ps, label=neuron) # Firing rate rf = 10 ax = axes[ibase + 1] ax.errorbar(xvar, full_metrics['mean firing rates (Hz)'].values, yerr=full_metrics['std firing rates (Hz)'].values, fmt=ls['full'], color=c['full'], linewidth=lw, markersize=ps) ax.errorbar(xvar, sonic_metrics['mean firing rates (Hz)'].values, yerr=sonic_metrics['std firing rates (Hz)'].values, fmt=ls['sonic'], color=c['sonic'], linewidth=lw, markersize=ps) # Spike amplitudes if spikeamp: ax = axes[2] rf = 10 ax.errorbar(xvar, full_metrics['mean spike amplitudes (nC/cm2)'].values, yerr=full_metrics['std spike amplitudes (nC/cm2)'].values, fmt=ls['full'], color=c['full'], linewidth=lw, markersize=ps) ax.errorbar(xvar, sonic_metrics['mean spike amplitudes (nC/cm2)'].values, yerr=sonic_metrics['std spike amplitudes (nC/cm2)'].values, fmt=ls['sonic'], color=c['sonic'], linewidth=lw, markersize=ps) # Adapt axes y-limits rf = 10 for ax in axes: ax.set_ylim([np.floor(ax.get_ylim()[0] / rf) * rf, np.ceil(ax.get_ylim()[1] / rf) * rf]) ax.set_yticks([max(ax.get_ylim()[0], 0), ax.get_ylim()[1]]) # Legend if len(metrics_dict.keys()) > 1: leg = axes[0].legend(fontsize=fs, frameon=False, bbox_to_anchor=(0., 0.9, 1., .102), loc=8, ncol=2, borderaxespad=0.) for l in leg.get_lines(): l.set_linestyle('-') fig.subplots_adjust(hspace=.3, bottom=0.2, left=0.35, right=0.95, top=0.95) return fig def plotFRProfile(filepaths, varname, no_offset=False, no_first=False, fs=15, lw=2, cmap=None, zscale='lin', zref='A'): ''' Plot spike rate temporal profiles from simulation results. :param filepaths: list of full paths to data files :param no_offset: boolean stating whether or not to discard post-offset spikes :param no_first: boolean stating whether or not to discard first spike :return: figure handle ''' pltvar = phaseplotvars[varname] # Create figure fig, ax = plt.subplots(figsize=(9, 4)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlabel('time (ms)', fontsize=fs) ax.set_ylabel('firing rate (Hz)', fontsize=fs) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) handles = [] amplitudes = [] intensities = [] # For each file for i, filepath in enumerate(filepaths): # Load data logger.info('loading data from file "{}"'.format(filepath)) with open(filepath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'] meta = frame['meta'] tstim = meta['tstim'] t = df['t'].values y = df[varname].values amplitudes.append(meta['Adrive']) intensities.append(Pressure2Intensity(meta['Adrive'])) # Prominence-based spike detection ispikes, *_ = findPeaks( y, mph=pltvar['thr_amp'], # mpd=int(np.ceil(SPIKE_MIN_DT / dt)), mpp=pltvar['thr_prom'] ) # Discard potential irrelevant spikes if len(ispikes) > 0: if no_offset: inds = np.where(t[ispikes] < tstim)[0] ispikes = ispikes[inds] if no_first: ispikes = ispikes[1:] # Plot firing rate as a function of spikes timing if len(ispikes) > 0: tspikes = t[ispikes][:-1] sr = 1 / np.diff(t[ispikes]) lh = ax.plot(tspikes * 1e3, sr, linewidth=lw)[0] handles.append(lh) else: logger.warning('No spikes detected') intensities = np.array(intensities) amplitudes = np.array(amplitudes) # Define and apply color code if zref == 'I': zref = intensities zlabel = 'Intensity' zunit = 'W/m2' elif zref == 'A': zref = amplitudes * 1e-3 zlabel = 'Amplitude' zunit = 'kPa' fig.tight_layout() if cmap is not None: mymap = plt.get_cmap(cmap) if zscale == 'lin': norm = matplotlib.colors.Normalize(zref.min(), zref.max()) elif zscale == 'log': norm = matplotlib.colors.LogNorm(zref.min(), zref.max()) sm = cm.ScalarMappable(norm=norm, cmap=mymap) sm._A = [] for lh, z in zip(handles, zref): lh.set_color(sm.to_rgba(z)) # Add colorbar fig.subplots_adjust(left=0.1, right=0.8, bottom=0.15, top=0.95, hspace=0.5) cbarax = fig.add_axes([0.85, 0.15, 0.03, 0.8]) fig.colorbar(sm, cax=cbarax, orientation='vertical') cbarax.set_ylabel('{} ({})'.format(zlabel, zunit), fontsize=fs) for item in cbarax.get_yticklabels(): item.set_fontsize(fs) else: for lh, z in zip(handles, zref): lh.set_label('{:.2f} {}'.format(z, zunit)) ax.legend() # Return return fig diff --git a/PySONIC/plt/timeseries.py b/PySONIC/plt/timeseries.py new file mode 100644 index 0000000..57e4000 --- /dev/null +++ b/PySONIC/plt/timeseries.py @@ -0,0 +1,508 @@ +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2018-09-25 16:18:45 +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2019-06-06 18:27:29 + +import re +import ntpath +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle +from matplotlib.ticker import FormatStrFormatter + +from ..core import getModel +from ..utils import * +from .pltutils import * + + +class TimeSeriesPlot: + ''' Generic interface to build a plot displaying temporal profiles of model simulations. ''' + + def __init__(self, filepaths, varname): + ''' Constructor. + + :param filepaths: list of full paths to output data files to be compared + :param varname: name of variable to extract and compare + ''' + self.filepaths = filepaths + self.varname = varname + + def __call__(self, *args, **kwargs): + return self.render(*args, **kwargs) + + def render(*args, **kwargs): + return NotImplementedError + + def checkInputs(self, *args, **kwargs): + return NotImplementedError + + def createBackBone(self, *args, **kwargs): + return NotImplementedError + + def getSimType(self, fname): + ''' Get sim type from filename. ''' + mo = re.search('(^[A-Z]*)_(.*).pkl', fname) + if not mo: + raise ValueError('Could not find sim-key in filename: "{}"'.format(fname)) + return mo.group(1) + + def getTimePltVar(self, tscale): + ''' Return time plot variable for a given temporal scale. ''' + return { + 'desc': 'time', + 'label': 'time', + 'unit': tscale, + 'factor': {'ms': 1e3, 'us': 1e6}[tscale], + 'onset': {'ms': 1e-3, 'us': 1e-6}[tscale] + } + + def getStimPulses(self, t, states): + ''' Determine the onset and offset times of pulses from a stimulation vector. + + :param t: time vector (s). + :param states: a vector of stimulation state (ON/OFF) at each instant in time. + :return: 3-tuple with number of patches, timing of STIM-ON an STIM-OFF instants. + ''' + # Compute states derivatives and identify bounds indexes of pulses + dstates = np.diff(states) + ipulse_on = np.insert(np.where(dstates > 0.0)[0] + 1, 0, 0) + ipulse_off = np.where(dstates < 0.0)[0] + 1 + if ipulse_off.size < ipulse_on.size: + ioff = t.size - 1 + if ipulse_off.size == 0: + ipulse_off = np.array([ioff]) + else: + ipulse_off = np.insert(ipulse_off, ipulse_off.size - 1, ioff) + + # Get time instants for pulses ON and OFF + tpulse_on = t[ipulse_on] + tpulse_off = t[ipulse_off] + return tpulse_on, tpulse_off + + def addLegend(self, ax, fs, black=False, straight=False, interactive=False): + lh = ax.legend(loc=1, fontsize=fs, frameon=False) + if black: + for l in lh.get_lines(): + l.set_color('k') + if straight: + for l in lh.get_lines(): + l.set_linestyle('-') + + def getStimStates(self, df): + try: + stimstate = df['stimstate'] + except KeyError: + stimstate = df['states'] + return stimstate.values + + def prepareTime(self, t, tplt): + if tplt['onset'] > 0.0: + tonset = np.array([-tplt['onset'], -t[0] - t[1]]) + t = np.hstack((tonset, t)) + return t * tplt['factor'] + + def addPatches(self, ax, tpatch_on, tpatch_off, tfactor, color='#8A8A8A'): + for i in range(tpatch_on.size): + ax.axvspan(tpatch_on[i] * tfactor, tpatch_off[i] * tfactor, + edgecolor='none', facecolor=color, alpha=0.2) + + def postProcess(self, *args, **kwargs): + return NotImplementedError + + def addInset(self, fig, ax, inset): + ''' Create inset axis. ''' + inset_ax = fig.add_axes(ax.get_position()) + inset_ax.set_zorder(1) + inset_ax.set_xlim(inset['xlims'][0], inset['xlims'][1]) + inset_ax.set_ylim(inset['ylims'][0], inset['ylims'][1]) + inset_ax.set_xticks([]) + inset_ax.set_yticks([]) + inset_ax.add_patch(Rectangle((inset['xlims'][0], inset['ylims'][0]), + inset['xlims'][1] - inset['xlims'][0], + inset['ylims'][1] - inset['ylims'][0], + color='w')) + return inset_ax + + def materializeInset(self, ax, inset_ax, inset): + ''' Materialize inset with zoom boox. ''' + # Re-position inset axis + axpos = ax.get_position() + left, right, = rescale(inset['xcoords'], ax.get_xlim()[0], ax.get_xlim()[1], + axpos.x0, axpos.x0 + axpos.width) + bottom, top, = rescale(inset['ycoords'], ax.get_ylim()[0], ax.get_ylim()[1], + axpos.y0, axpos.y0 + axpos.height) + inset_ax.set_position([left, bottom, right - left, top - bottom]) + for i in inset_ax.spines.values(): + i.set_linewidth(2) + + # Materialize inset target region with contour frame + ax.plot(inset['xlims'], [inset['ylims'][0]] * 2, linestyle='-', color='k') + ax.plot(inset['xlims'], [inset['ylims'][1]] * 2, linestyle='-', color='k') + ax.plot([inset['xlims'][0]] * 2, inset['ylims'], linestyle='-', color='k') + ax.plot([inset['xlims'][1]] * 2, inset['ylims'], linestyle='-', color='k') + + # Link target and inset with dashed lines if possible + if inset['xcoords'][1] < inset['xlims'][0]: + ax.plot([inset['xcoords'][1], inset['xlims'][0]], + [inset['ycoords'][0], inset['ylims'][0]], + linestyle='--', color='k') + ax.plot([inset['xcoords'][1], inset['xlims'][0]], + [inset['ycoords'][1], inset['ylims'][1]], + linestyle='--', color='k') + elif inset['xcoords'][0] > inset['xlims'][1]: + ax.plot([inset['xcoords'][0], inset['xlims'][1]], + [inset['ycoords'][0], inset['ylims'][0]], + linestyle='--', color='k') + ax.plot([inset['xcoords'][0], inset['xlims'][1]], + [inset['ycoords'][1], inset['ylims'][1]], + linestyle='--', color='k') + else: + logger.warning('Inset x-coordinates intersect with those of target region') + + def addInsetPatches(self, ax, inset_ax, inset, tpatch_on, tpatch_off, tfactor, color): + ybottom, ytop = ax.get_ylim() + cond_on = np.logical_and(tpatch_on > (inset['xlims'][0] / tfactor), + tpatch_on < (inset['xlims'][1] / tfactor)) + cond_off = np.logical_and(tpatch_off > (inset['xlims'][0] / tfactor), + tpatch_off < (inset['xlims'][1] / tfactor)) + cond_glob = np.logical_and(tpatch_on < (inset['xlims'][0] / tfactor), + tpatch_off > (inset['xlims'][1] / tfactor)) + cond_onoff = np.logical_or(cond_on, cond_off) + cond = np.logical_or(cond_onoff, cond_glob) + npatches_inset = np.sum(cond) + for i in range(npatches_inset): + inset_ax.add_patch(Rectangle((tpatch_on[cond][i] * tfactor, ybottom), + (tpatch_off[cond][i] - tpatch_on[cond][i]) * + tfactor, ytop - ybottom, color=color, + alpha=0.1)) + + def removeSpines(self, ax): + for item in ['top', 'right']: + ax.spines[item].set_visible(False) + + def setTimeLabel(self, ax, tplt, fs): + ax.set_xlabel('$\\rm {}\ ({})$'.format(tplt['label'], tplt['unit']), fontsize=fs) + + def setYLabel(self, ax, yplt, fs, grouplabel=None): + lbl = grouplabel if grouplabel is not None else yplt['label'] + ax.set_ylabel('$\\rm {}\ ({})$'.format(lbl, yplt.get('unit', '')), fontsize=fs) + + def setYTicks(self, ax, yticks=None): + if yticks is not None: # optional y-ticks + ax.set_yticks(yticks) + # else: + # ax.locator_params(axis='y', nbins=2) + # ax.yaxis.set_major_locator(plt.MaxNLocator(2)) + # if any(ax.get_yticks() < 0): + # ax.yaxis.set_major_formatter(FormatStrFormatter('%+.0f')) + + def setTickLabelsFontSize(self, ax, fs): + for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks(): + tick.label.set_fontsize(fs) + + +class ComparativePlot(TimeSeriesPlot): + ''' Interface to build a comparative plot displaying profiles of a specific output variable + across different model simulations. ''' + + def __init__(self, filepaths, varname): + ''' Constructor. + + :param filepaths: list of full paths to output data files to be compared + :param varname: name of variable to extract and compare + ''' + self.sim_type_ref = None + super().__init__(filepaths, varname) + + def checkInputs(self, lines, labels, colors, patches): + # Input check: labels + if labels is not None: + if len(labels) != len(self.filepaths): + raise ValueError( + 'Invalid labels ({}): not matching number of compared files ({})'.format( + len(labels), len(self.filepaths))) + if not all(isinstance(x, str) for x in labels): + raise TypeError('Invalid labels: must be string typed') + + # Input check: line styles and colors + if colors is None: + colors = ['C{}'.format(j) for j in range(len(self.filepaths))] + if lines is None: + lines = ['-'] * len(self.filepaths) + + # Input check: STIM-ON patches + greypatch = False + if patches == 'none': + patches = [False] * len(self.filepaths) + elif patches == 'all': + patches = [True] * len(self.filepaths) + elif patches == 'one': + patches = [True] + [False] * (len(self.filepaths) - 1) + greypatch = True + elif isinstance(patches, list): + if len(patches) != len(self.filepaths): + raise ValueError( + 'Invalid patches ({}): not matching number of compared files ({})'.format( + len(patches), len(self.filepaths))) + if not all(isinstance(p, bool) for p in patches): + raise TypeError('Invalid patch sequence: all list items must be boolean typed') + else: + raise ValueError( + 'Invalid patches: must be either "none", all", "one", or a boolean list') + return lines, labels, colors, patches, greypatch + + def createBackBone(self, figsize): + fig, ax = plt.subplots(figsize=figsize) + ax.set_zorder(0) + return fig, ax + + def postProcess(self, ax, tplt, yplt, fs, xticks, yticks): + self.removeSpines(ax) + if 'bounds' in yplt: + ax.set_ylim(*yplt['bounds']) + self.setTimeLabel(ax, tplt, fs) + self.setYLabel(ax, yplt, fs, grouplabel=None) + if xticks is not None: # optional x-ticks + ax.set_xticks(xticks) + self.setYTicks(ax, yticks) + self.setTickLabelsFontSize(ax, fs) + + + def render(self, figsize=(11, 4), fs=10, lw=2, labels=None, colors=None, lines=None, + patches='one', xticks=None, yticks=None, blacklegend=False, straightlegend=False, + inset=None): + ''' Render plot. + + :param figsize: figure size (x, y) + :param fs: labels fontsize + :param lw: linewidth + :param labels: list of labels to use in the legend + :param colors: list of colors to use for each curve + :param lines: list of linestyles + :param patches: string indicating whether/how to mark stimulation periods + with rectangular patches + :param xticks: list of x-ticks + :param yticks: list of y-ticks + :param blacklegend: boolean indicating whether to use black lines in the legend + :param straightlegend: boolean indicating whether to use straight lines in the legend + :param inset: string indicating whether/how to mark an inset zooming on + a particular region of the graph + :return: figure handle + ''' + + lines, labels, colors, patches, greypatch = self.checkInputs( + lines, labels, colors, patches) + + fig, ax = self.createBackBone(figsize) + if inset is not None: + inset_ax = self.addInset(fig, ax, inset) + + # Loop through data files + for j, filepath in enumerate(self.filepaths): + + # Retrieve sim type + pkl_filename = ntpath.basename(filepath) + sim_type = self.getSimType(pkl_filename) + + # Check consistency if sim types + if self.sim_type_ref is None: + self.sim_type_ref = sim_type + elif sim_type != self.sim_type_ref: + raise ValueError('Invalid comparison: different simulation types') + + # Load data and extract model + data, meta = loadData(filepath) + stimstate = self.getStimStates(data) + model = getModel(sim_type, meta) + + # Extract time and stim pulses + t = data['t'].values + tpatch_on, tpatch_off = self.getStimPulses(t, stimstate) + tplt = self.getTimePltVar(model.tscale) + t = self.prepareTime(t, tplt) + + # Extract y-variable + pltvars = model.getPltVars() + if self.varname not in pltvars: + raise KeyError( + 'Unknown plot variable: "{}". Possible plot variables are: {}'.format( + self.varname, ', '.join(['"{}"'.format(p) for p in pltvars.keys()]))) + yplt = pltvars[self.varname] + y = extractPltVar(model, yplt, data, meta, t.size, self.varname) + + # Plot time series + ax.plot(t, y, linewidth=lw, linestyle=lines[j], color=colors[j], + label=labels[j] if labels is not None else figtitle(meta)) + + # Plot optional inset + if inset is not None: + inset_window = np.logical_and(t > (inset['xlims'][0] / tplt['factor']), + t < (inset['xlims'][1] / tplt['factor'])) + inset_ax.plot(t[inset_window] * tplt['factor'], y[inset_window] * yplt['factor'], + linewidth=lw, linestyle=lines[j], color=colors[j]) + + # Add optional STIM-ON patches + if patches[j]: + ybottom, ytop = ax.get_ylim() + color = '#8A8A8A' if greypatch else handle[0].get_color() + self.addPatches(ax, tpatch_on, tpatch_off, tplt['factor'], color) + if inset is not None: + self.addInsetPatches( + ax, inset_ax, inset, tpatch_on, tpatch_off, tplt['factor'], color) + + # Postprocess figure + self.postProcess(ax, tplt, yplt, fs, xticks, yticks) + fig.tight_layout() + if inset is not None: + self.materializeInset(ax, inset_ax, inset) + + # Add legend + self.addLegend(ax, fs, black=blacklegend, straight=straightlegend) + + return fig + + +class SchemePlot(TimeSeriesPlot): + ''' Interface to build a plot displaying profiles of several output variables + arranged into specific schemes. ''' + + def __init__(self, filepaths, pltscheme=None): + ''' Constructor. + + :param filepaths: list of full paths to output data files to be compared + :param varname: name of variable to extract and compare + ''' + self.pltscheme = pltscheme + self.filepaths = filepaths + + def createBackBone(self, pltscheme): + naxes = len(pltscheme) + if naxes == 1: + fig, ax = plt.subplots(figsize=(11, 4)) + axes = [ax] + else: + fig, axes = plt.subplots(naxes, 1, figsize=(11, min(3 * naxes, 9))) + return fig, axes + + def postProcess(self, axes, tplt, yplt, fs): + for ax in axes: + self.removeSpines(ax) + self.setTickLabelsFontSize(ax, fs) + for ax in axes[:-1]: + ax.set_xticklabels([]) + self.setTimeLabel(axes[-1], tplt, fs) + + def render(self, fs=10, lw=2, labels=None, colors=None, lines=None, patches=True, title=True, + save=False, directory=None, ask_before_save=True, fig_ext='png', frequency=1): + + figs = [] + for filepath in self.filepaths: + + # Retrieve file code and sim type from file name + pkl_filename = os.path.basename(filepath) + filecode = pkl_filename[0:-4] + sim_type = self.getSimType(pkl_filename) + + # Load data and extract model + data, meta = loadData(filepath, frequency) + stimstate = self.getStimStates(data) + model = getModel(sim_type, meta) + + # Extract time and stim pulses + t = data['t'].values + tpatch_on, tpatch_off = self.getStimPulses(t, stimstate) + tplt = self.getTimePltVar(model.tscale) + t = self.prepareTime(t, tplt) + + # Check plot scheme if provided, otherwise generate it + pltvars = model.getPltVars() + if self.pltscheme is not None: + for key in list(sum(list(self.pltscheme.values()), [])): + if key not in pltvars: + raise KeyError('Unknown plot variable: "{}"'.format(key)) + pltscheme = self.pltscheme + else: + pltscheme = model.getPltScheme() + + # Create figure + fig, axes = self.createBackBone(pltscheme) + + # Loop through each subgraph + for ax, (grouplabel, keys) in zip(axes, pltscheme.items()): + + # Extract variables to plot + nvars = len(keys) + ax_pltvars = [pltvars[k] for k in keys] + if nvars == 1: + ax_pltvars[0]['color'] = 'k' + ax_pltvars[0]['ls'] = '-' + + # Set y-axis unit and bounds + self.setYLabel(ax, ax_pltvars[0], fs, grouplabel=grouplabel) + if 'bounds' in ax_pltvars[0]: + ax_min = min([ap['bounds'][0] for ap in ax_pltvars]) + ax_max = max([ap['bounds'][1] for ap in ax_pltvars]) + ax.set_ylim(ax_min, ax_max) + + # Plot time series + icolor = 0 + for yplt, name in zip(ax_pltvars, pltscheme[grouplabel]): + y = extractPltVar(model, yplt, data, meta, t.size, name) + ax.plot(t, y, yplt.get('ls', '-'), c=yplt.get('color', 'C{}'.format(icolor)), + lw=lw, label='$\\rm {}$'.format(yplt['label'])) + if 'color' not in yplt: + icolor += 1 + + # Add legend + if nvars > 1 or 'gate' in ax_pltvars[0]['desc']: + ax.legend(fontsize=fs, loc=7, ncol=nvars // 4 + 1, frameon=False) + + if patches: + for ax in axes: + self.addPatches(ax, tpatch_on, tpatch_off, tplt['factor']) + + # Post-process figure + self.postProcess(axes, tplt, yplt, fs) + if title: + axes[0].set_title(figtitle(meta), fontsize=fs) + fig.tight_layout() + + # Save figure if needed (automatic or checked) + if save: + if directory is None: + directory = os.path.split(filepath)[0] + if ask_before_save: + plt_filename = SaveFileDialog( + '{}_{}.{}'.format(filecode, tag, fig_ext), + dirname=directory, ext=fig_ext) + else: + plt_filename = '{}/{}_{}.{}'.format(directory, filecode, tag, fig_ext) + if plt_filename: + plt.savefig(plt_filename) + logger.info('Saving figure as "{}"'.format(plt_filename)) + plt.close() + + figs.append(fig) + return figs + + +if __name__ == '__main__': + filepaths = OpenFilesDialog('pkl')[0] + comp_plot = ComparativePlot(filepaths, 'Qm') + fig = comp_plot.render( + lines=['-', '--'], + labels=['60 kPa', '80 kPa'], + patches='one', + colors=['r', 'g'], + blacklegend=False, + straightlegend=False, + xticks=[0, 100], + yticks=[-80, +50], + inset={'xcoords': [5, 40], 'ycoords': [-35, 45], 'xlims': [57.5, 60.5], 'ylims': [10, 35]} + ) + + scheme_plot = SchemePlot(filepaths) + figs = scheme_plot.render() + + plt.show() diff --git a/PySONIC/utils.py b/PySONIC/utils.py index d6fc1a8..8162215 100644 --- a/PySONIC/utils.py +++ b/PySONIC/utils.py @@ -1,858 +1,835 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2016-09-19 22:30:46 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 19:50:08 +# @Last Modified time: 2019-06-06 16:13:51 ''' Definition of generic utility functions used in other modules ''' import csv from functools import wraps import operator import time import os import math import pickle from tqdm import tqdm import logging import tkinter as tk from tkinter import filedialog import numpy as np import colorlog from scipy.interpolate import interp1d # Package logger my_log_formatter = colorlog.ColoredFormatter( '%(log_color)s %(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:', reset=True, log_colors={ 'DEBUG': 'green', 'INFO': 'white', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', }, - style='%' -) + style='%') def setHandler(logger, handler): for h in logger.handlers: logger.removeHandler(h) logger.addHandler(handler) return logger def setLogger(name, formatter): handler = colorlog.StreamHandler() handler.setFormatter(formatter) logger = colorlog.getLogger(name) logger.addHandler(handler) return logger class TqdmHandler(logging.StreamHandler): def __init__(self, formatter): logging.StreamHandler.__init__(self) self.setFormatter(formatter) def emit(self, record): msg = self.format(record) tqdm.write(msg) logger = setLogger('PySONIC', my_log_formatter) -titrations_logfile = os.path.join(os.path.split(__file__)[0], 'neurons', 'titrations.log') - - -# Figure naming conventions -def figtitle(meta): - ''' Return appropriate title based on simulation metadata. ''' - if 'Cm0' in meta: - return '{:.0f}nm radius BLS structure: MECH-STIM {:.0f}kHz, {:.2f}kPa, {:.1f}nC/cm2'.format( - meta['a'] * 1e9, meta['Fdrive'] * 1e-3, meta['Adrive'] * 1e-3, meta['Qm'] * 1e5) - else: - if meta['DC'] < 1: - wavetype = 'PW' - suffix = ', {:.2f}Hz PRF, {:.0f}% DC'.format(meta['PRF'], meta['DC'] * 1e2) - else: - wavetype = 'CW' - suffix = '' - if 'Astim' in meta: - return '{} neuron: {} E-STIM {:.2f}mA/m2, {:.0f}ms{}'.format( - meta['neuron'], wavetype, meta['Astim'], meta['tstim'] * 1e3, suffix) - else: - return '{} neuron ({:.1f}nm): {} A-STIM {:.0f}kHz {:.2f}kPa, {:.0f}ms{} - {} model'.format( - meta['neuron'], meta['a'] * 1e9, wavetype, meta['Fdrive'] * 1e-3, - meta['Adrive'] * 1e-3, meta['tstim'] * 1e3, suffix, meta['method']) - # SI units prefixes si_prefixes = { 'y': 1e-24, # yocto 'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nano 'u': 1e-6, # micro 'm': 1e-3, # mili '': 1e0, # None 'k': 1e3, # kilo 'M': 1e6, # mega 'G': 1e9, # giga 'T': 1e12, # tera 'P': 1e15, # peta 'E': 1e18, # exa 'Z': 1e21, # zetta 'Y': 1e24, # yotta } -def loadData(fpath, frequency=1): - ''' Load dataframe and metadata dictionary from pickle file. ''' - logger.info('Loading data from "%s"', os.path.basename(fpath)) - with open(fpath, 'rb') as fh: - frame = pickle.load(fh) - df = frame['data'].iloc[::frequency] - meta = frame['meta'] - return df, meta def si_format(x, precision=0, space=' '): ''' Format a float according to the SI unit system, with the appropriate prefix letter. ''' if isinstance(x, float) or isinstance(x, int) or isinstance(x, np.float) or\ isinstance(x, np.int32) or isinstance(x, np.int64): if x == 0: factor = 1e0 prefix = '' else: sorted_si_prefixes = sorted(si_prefixes.items(), key=operator.itemgetter(1)) vals = [tmp[1] for tmp in sorted_si_prefixes] # vals = list(si_prefixes.values()) ix = np.searchsorted(vals, np.abs(x)) - 1 if np.abs(x) == vals[ix + 1]: ix += 1 factor = vals[ix] prefix = sorted_si_prefixes[ix][0] # prefix = list(si_prefixes.keys())[ix] return '{{:.{}f}}{}{}'.format(precision, space, prefix).format(x / factor) elif isinstance(x, list) or isinstance(x, tuple): return [si_format(item, precision, space) for item in x] elif isinstance(x, np.ndarray) and x.ndim == 1: return [si_format(float(item), precision, space) for item in x] else: print(type(x)) def pow10_format(number, precision=2): ''' Format a number in power of 10 notation. ''' ret_string = '{0:.{1:d}e}'.format(number, precision) a, b = ret_string.split("e") a = float(a) b = int(b) return '{}10^{{{}}}'.format('{} * '.format(a) if a != 1. else '', b) def rmse(x1, x2): ''' Compute the root mean square error between two 1D arrays ''' return np.sqrt(((x1 - x2) ** 2).mean()) def rsquared(x1, x2): ''' compute the R-squared coefficient between two 1D arrays ''' residuals = x1 - x2 ss_res = np.sum(residuals**2) ss_tot = np.sum((x1 - np.mean(x1))**2) return 1 - (ss_res / ss_tot) def getInDict(d, key, func): ''' Return value of specific dictionary key, or function return alias if not there. ''' if key in d: return d[key] else: return func() def Pressure2Intensity(p, rho=1075.0, c=1515.0): ''' Return the spatial peak, pulse average acoustic intensity (ISPPA) associated with the specified pressure amplitude. :param p: pressure amplitude (Pa) :param rho: medium density (kg/m3) :param c: speed of sound in medium (m/s) :return: spatial peak, pulse average acoustic intensity (W/m2) ''' return p**2 / (2 * rho * c) def Intensity2Pressure(I, rho=1075.0, c=1515.0): ''' Return the pressure amplitude associated with the specified spatial peak, pulse average acoustic intensity (ISPPA). :param I: spatial peak, pulse average acoustic intensity (W/m2) :param rho: medium density (kg/m3) :param c: speed of sound in medium (m/s) :return: pressure amplitude (Pa) ''' return np.sqrt(2 * rho * c * I) def OpenFilesDialog(filetype, dirname=''): ''' Open a FileOpenDialogBox to select one or multiple file. The default directory and file type are given. :param dirname: default directory :param filetype: default file type :return: tuple of full paths to the chosen filenames ''' root = tk.Tk() root.withdraw() filenames = filedialog.askopenfilenames(filetypes=[(filetype + " files", '.' + filetype)], initialdir=dirname) if filenames: par_dir = os.path.abspath(os.path.join(filenames[0], os.pardir)) else: par_dir = None return (filenames, par_dir) def selectDirDialog(): ''' Open a dialog box to select a directory. :return: full path to selected directory ''' root = tk.Tk() root.withdraw() return filedialog.askdirectory() def SaveFileDialog(filename, dirname=None, ext=None): ''' Open a dialog box to save file. :param filename: filename :param dirname: initial directory :param ext: default extension :return: full path to the chosen filename ''' root = tk.Tk() root.withdraw() filename_out = filedialog.asksaveasfilename( defaultextension=ext, initialdir=dirname, initialfile=filename) return filename_out +def loadData(fpath, frequency=1): + ''' Load dataframe and metadata dictionary from pickle file. ''' + logger.info('Loading data from "%s"', os.path.basename(fpath)) + with open(fpath, 'rb') as fh: + frame = pickle.load(fh) + df = frame['data'].iloc[::frequency] + meta = frame['meta'] + return df, meta + + def downsample(t_dense, y, nsparse): ''' Decimate periodic signals to a specified number of samples.''' if(y.ndim) > 1: nsignals = y.shape[0] else: nsignals = 1 y = np.array([y]) # determine time step and period of input signal T = t_dense[-1] - t_dense[0] dt_dense = t_dense[1] - t_dense[0] # resample time vector linearly t_ds = np.linspace(t_dense[0], t_dense[-1], nsparse) # create MAV window nmav = int(0.03 * T / dt_dense) if nmav % 2 == 0: nmav += 1 mav = np.ones(nmav) / nmav # determine signals padding npad = int((nmav - 1) / 2) # determine indexes of sampling on convolved signals ids = np.round(np.linspace(0, t_dense.size - 1, nsparse)).astype(int) y_ds = np.empty((nsignals, nsparse)) # loop through signals for i in range(nsignals): # pad, convolve and resample pad_left = y[i, -(npad + 2):-2] pad_right = y[i, 1:npad + 1] y_ext = np.concatenate((pad_left, y[i, :], pad_right), axis=0) y_mav = np.convolve(y_ext, mav, mode='valid') y_ds[i, :] = y_mav[ids] if nsignals == 1: y_ds = y_ds[0, :] return (t_ds, y_ds) def rescale(x, lb=None, ub=None, lb_new=0, ub_new=1): ''' Rescale a value to a specific interval by linear transformation. ''' if lb is None: lb = x.min() if ub is None: ub = x.max() xnorm = (x - lb) / (ub - lb) return xnorm * (ub_new - lb_new) + lb_new def getNeuronLookupsFile(mechname, a=None, Fdrive=None, Adrive=None, fs=False): fpath = os.path.join( os.path.split(__file__)[0], 'neurons', '{}_lookups'.format(mechname) ) if a is not None: fpath += '_{:.0f}nm'.format(a * 1e9) if Fdrive is not None: fpath += '_{:.0f}kHz'.format(Fdrive * 1e-3) if Adrive is not None: fpath += '_{:.0f}kPa'.format(Adrive * 1e-3) if fs is True: fpath += '_fs' return '{}.pkl'.format(fpath) def getLookups4D(mechname): ''' Retrieve 4D lookup tables and reference vectors for a given membrane mechanism. :param mechname: name of membrane density mechanism :return: 4-tuple with 1D numpy arrays of reference input vectors (charge density and one other variable), a dictionary of associated 2D lookup numpy arrays, and a dictionary with information about the other variable. ''' # Check lookup file existence lookup_path = getNeuronLookupsFile(mechname) if not os.path.isfile(lookup_path): raise FileNotFoundError('Missing lookup file: "{}"'.format(lookup_path)) # Load lookups dictionary # logger.debug('Loading %s lookup table', mechname) with open(lookup_path, 'rb') as fh: df = pickle.load(fh) inputs = df['input'] lookups4D = df['lookup'] # Retrieve 1D inputs from lookups dictionary aref = inputs['a'] Fref = inputs['f'] Aref = inputs['A'] Qref = inputs['Q'] return aref, Fref, Aref, Qref, lookups4D def getLookupsOff(mechname): ''' Retrieve appropriate US-OFF lookup tables and reference vectors for a given membrane mechanism. :param mechname: name of membrane density mechanism :return: 2-tuple with 1D numpy array of reference charge density and dictionary of associated 1D lookup numpy arrays. ''' # Get 4D lookups and input vectors aref, Fref, Aref, Qref, lookups4D = getLookups4D(mechname) # Perform 2D projection in appropriate dimensions logger.debug('Interpolating lookups at A = 0') lookups_off = {key: y4D[0, 0, 0, :] for key, y4D in lookups4D.items()} return Qref, lookups_off def getLookups2D(mechname, a=None, Fdrive=None, Adrive=None): ''' Retrieve appropriate 2D lookup tables and reference vectors for a given membrane mechanism, projected at a specific combination of sonophore radius, US frequency and/or acoustic pressure amplitude. :param mechname: name of membrane density mechanism :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param Adrive: Acoustic peak pressure amplitude (Hz) :return: 4-tuple with 1D numpy arrays of reference input vectors (charge density and one other variable), a dictionary of associated 2D lookup numpy arrays, and a dictionary with information about the other variable. ''' # Get 4D lookups and input vectors aref, Fref, Aref, Qref, lookups4D = getLookups4D(mechname) # Check that inputs are within lookup range if a is not None: a = isWithin('radius', a, (aref.min(), aref.max())) if Fdrive is not None: Fdrive = isWithin('frequency', Fdrive, (Fref.min(), Fref.max())) if Adrive is not None: Adrive = isWithin('amplitude', Adrive, (Aref.min(), Aref.max())) # Determine projection dimensions based on inputs var_a = {'name': 'a', 'label': 'sonophore radius', 'val': a, 'unit': 'm', 'factor': 1e9, 'ref': aref, 'axis': 0} var_Fdrive = {'name': 'f', 'label': 'frequency', 'val': Fdrive, 'unit': 'Hz', 'factor': 1e-3, 'ref': Fref, 'axis': 1} var_Adrive = {'name': 'A', 'label': 'amplitude', 'val': Adrive, 'unit': 'Pa', 'factor': 1e-3, 'ref': Aref, 'axis': 2} if not isinstance(Adrive, float): var1 = var_a var2 = var_Fdrive var3 = var_Adrive elif not isinstance(Fdrive, float): var1 = var_a var2 = var_Adrive var3 = var_Fdrive elif not isinstance(a, float): var1 = var_Fdrive var2 = var_Adrive var3 = var_a # Perform 2D projection in appropriate dimensions # logger.debug('Interpolating lookups at (%s = %s%s, %s = %s%s)', # var1['name'], si_format(var1['val'], space=' '), var1['unit'], # var2['name'], si_format(var2['val'], space=' '), var2['unit']) lookups3D = {key: interp1d(var1['ref'], y4D, axis=var1['axis'])(var1['val']) for key, y4D in lookups4D.items()} if var2['axis'] > var1['axis']: var2['axis'] -= 1 lookups2D = {key: interp1d(var2['ref'], y3D, axis=var2['axis'])(var2['val']) for key, y3D in lookups3D.items()} if var3['val'] is not None: logger.debug('Interpolating lookups at %d new %s values between %s%s and %s%s', len(var3['val']), var3['name'], si_format(min(var3['val']), space=' '), var3['unit'], si_format(max(var3['val']), space=' '), var3['unit']) lookups2D = {key: interp1d(var3['ref'], y2D, axis=0)(var3['val']) for key, y2D in lookups2D.items()} var3['ref'] = np.array(var3['val']) return var3['ref'], Qref, lookups2D, var3 def getLookups2Dfs(mechname, a, Fdrive, fs): # Check lookup file existence lookup_path = getNeuronLookupsFile(mechname, a=a, Fdrive=Fdrive, fs=True) if not os.path.isfile(lookup_path): raise FileNotFoundError('Missing lookup file: "{}"'.format(lookup_path)) # Load lookups dictionary logger.debug('Loading %s lookup table with fs = %.0f%%', mechname, fs * 1e2) with open(lookup_path, 'rb') as fh: df = pickle.load(fh) inputs = df['input'] lookups3D = df['lookup'] # Retrieve 1D inputs from lookups dictionary fsref = inputs['fs'] Aref = inputs['A'] Qref = inputs['Q'] # Check that fs is within lookup range fs = isWithin('coverage', fs, (fsref.min(), fsref.max())) # Perform projection at fs logger.debug('Interpolating lookups at fs = %s%%', fs * 1e2) lookups2D = {key: interp1d(fsref, y3D, axis=2)(fs) for key, y3D in lookups3D.items()} return Aref, Qref, lookups2D def getLookupsDCavg(mechname, a, Fdrive, amps=None, charges=None, DCs=1.0): ''' Get the DC-averaged lookups of a specific neuron for a combination of US amplitudes, charge densities and duty cycles, at a specific US frequency. :param mechname: name of membrane density mechanism :param a: sonophore radius (m) :param Fdrive: US frequency (Hz) :param amps: US amplitudes (Pa) :param charges: membrane charge densities (C/m2) :param DCs: duty cycle value(s) :return: 4-tuple with reference values of US amplitude and charge density, as well as interpolated Vmeff and QSS gating variables ''' # Get lookups for specific (a, f, A) combination Aref, Qref, lookups2D, _ = getLookups2D(mechname, a=a, Fdrive=Fdrive) if 'ng' in lookups2D: lookups2D.pop('ng') # Derive inputs from lookups reference if not provided if amps is None: amps = Aref if charges is None: charges = Qref # Transform inputs into arrays if single value provided if isinstance(amps, float): amps = np.array([amps]) if isinstance(charges, float): charges = np.array([charges]) if isinstance(DCs, float): DCs = np.array([DCs]) nA, nQ, nDC = amps.size, charges.size, DCs.size cs = {True: 's', False: ''} # logger.debug('%u amplitude%s, %u charge%s, %u DC%s', # nA, cs[nA > 1], nQ, cs[nQ > 1], nDC, cs[nDC > 1]) # Re-interpolate lookups at input charges lookups2D = {key: interp1d(Qref, y2D, axis=1)(charges) for key, y2D in lookups2D.items()} # Interpolate US-ON (for each input amplitude) and US-OFF (A = 0) lookups amps = isWithin('amplitude', amps, (Aref.min(), Aref.max())) lookups_on = {key: interp1d(Aref, y2D, axis=0)(amps) for key, y2D in lookups2D.items()} lookups_off = {key: interp1d(Aref, y2D, axis=0)(0.0) for key, y2D in lookups2D.items()} # Compute DC-averaged lookups lookups_DCavg = {} for key in lookups2D.keys(): x_on, x_off = lookups_on[key], lookups_off[key] x_avg = np.empty((nA, nQ, nDC)) for iA, Adrive in enumerate(amps): for iDC, DC in enumerate(DCs): x_avg[iA, :, iDC] = x_on[iA, :] * DC + x_off * (1 - DC) lookups_DCavg[key] = x_avg return amps, charges, lookups_DCavg def isWithin(name, val, bounds, rel_tol=1e-9): ''' Check if a floating point number is within an interval. If the value falls outside the interval, an error is raised. If the value falls just outside the interval due to rounding errors, the associated interval bound is returned. :param val: float value :param bounds: interval bounds (float tuple) :return: original or corrected value ''' if isinstance(val, list) or isinstance(val, np.ndarray) or isinstance(val, tuple): return [isWithin(name, v, bounds, rel_tol) for v in val] if val >= bounds[0] and val <= bounds[1]: return val elif val < bounds[0] and math.isclose(val, bounds[0], rel_tol=rel_tol): logger.warning('Rounding %s value (%s) to interval lower bound (%s)', name, val, bounds[0]) return bounds[0] elif val > bounds[1] and math.isclose(val, bounds[1], rel_tol=rel_tol): logger.warning('Rounding %s value (%s) to interval upper bound (%s)', name, val, bounds[1]) return bounds[1] else: raise ValueError('{} value ({}) out of [{}, {}] interval'.format( name, val, bounds[0], bounds[1])) def getLookupsCompTime(mechname): # Check lookup file existence lookup_path = getNeuronLookupsFile(mechname) if not os.path.isfile(lookup_path): raise FileNotFoundError('Missing lookup file: "{}"'.format(lookup_path)) # Load lookups dictionary logger.debug('Loading comp times') with open(lookup_path, 'rb') as fh: df = pickle.load(fh) tcomps4D = df['tcomp'] return np.sum(tcomps4D) def getLowIntensitiesSTN(): ''' Return an array of acoustic intensities (W/m2) used to study the STN neuron in Tarnaud, T., Joseph, W., Martens, L., and Tanghe, E. (2018). Computational Modeling of Ultrasonic Subthalamic Nucleus Stimulation. IEEE Trans Biomed Eng. ''' return np.hstack(( np.arange(10, 101, 10), np.arange(101, 131, 1), np.array([140]) )) # W/m2 def getDistribution(xmin, xmax, nx, scale='lin'): if scale == 'log': xmin, xmax = np.log10(xmin), np.log10(xmax) return {'lin': np.linspace, 'log': np.logspace}[scale](xmin, xmax, nx) def getDistFromList(xlist): if not isinstance(xlist, list): raise TypeError('Input must be a list') if len(xlist) != 4: raise ValueError('List must contain exactly 4 arguments ([type, min, max, n])') scale = xlist[0] if scale not in ('log', 'lin'): raise ValueError('Unknown distribution type (must be "lin" or "log")') xmin, xmax = [float(x) for x in xlist[1:-1]] if xmin >= xmax: raise ValueError('Specified minimum higher or equal than specified maximum') nx = int(xlist[-1]) if nx < 2: raise ValueError('Specified number must be at least 2') return getDistribution(xmin, xmax, nx, scale=scale) def parseUSAmps(args, defaults): # Check if several mutually exclusive arguments were provided Aparams = ['Arange', 'Irange', 'amp', 'intensity'] if sum([x in args for x in Aparams]) > 1: raise ValueError('You must provide only one of the following arguments: {}'.format( ', '.join(Aparams))) if 'Arange' in args: return getDistFromList(args['Arange']) * 1e3 # Pa elif 'Irange' in args: return Intensity2Pressure(getDistFromList(args['Irange']) * 1e4) # Pa elif 'amp' in args: return np.array(args['amp']) * 1e3 # Pa elif 'intensity' in args: return Intensity2Pressure(np.array(args['intensity']) * 1e4) # Pa return np.array(defaults['amp']) * 1e3 # Pa def addUSAmps(ap): ap.add_argument('-A', '--amp', nargs='+', type=float, help='Acoustic pressure amplitude (kPa)') ap.add_argument('--Arange', type=str, nargs='+', help='Amplitude range [scale min max n] (kPa)') ap.add_argument('-I', '--intensity', nargs='+', type=float, help='Acoustic intensity (W/cm2)') ap.add_argument('--Irange', type=str, nargs='+', help='Intensity range [scale min max n] (W/cm2)') def parseElecAmps(args, defaults): # Check if several mutually exclusive arguments were provided Aparams = ['Arange', 'amp'] if sum([x in args for x in Aparams]) > 1: raise ValueError('You must provide only one of the following arguments: {}'.format( ', '.join(Aparams))) if 'Arange' in args: return getDistFromList(args['Arange']) # mA/m2 elif 'amp' in args: return np.array(args['amp']) # mA/m2 return np.array(defaults['amp']) # mA/m2 def getIndex(container, value): ''' Return the index of a float / string value in a list / array :param container: list / 1D-array of elements :param value: value to search for :return: index of value (if found) ''' if isinstance(value, float): container = np.array(container) imatches = np.where(np.isclose(container, value, rtol=1e-9, atol=1e-16))[0] if len(imatches) == 0: raise ValueError('{} not found in {}'.format(value, container)) return imatches[0] elif isinstance(value, str): return container.index(value) def debug(func): ''' Print the function signature and return value. ''' @wraps(func) def wrapper_debug(*args, **kwargs): args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = '{}({})'.format(func.__name__, ', '.join(args_repr + kwargs_repr)) print('Calling {}'.format(signature)) value = func(*args, **kwargs) print(f"{func.__name__!r} returned {value!r}") return value return wrapper_debug def timer(func): ''' Monitor and return the runtime of the decorated function. ''' @wraps(func) def wrapper(*args, **kwargs): start_time = time.perf_counter() value = func(*args, **kwargs) end_time = time.perf_counter() run_time = end_time - start_time return value, run_time return wrapper def cache(fpath, delimiter='\t', out_type=float): ''' Add an extra IO memoization functionality to a function using file caching, to avoid repetitions of tedious computations with identical inputs. ''' def wrapper_with_args(func): @wraps(func) def wrapper(*args, **kwargs): # If function has history -> do not log if 'history' in kwargs: return func(*args, **kwargs) # Translate function arguments into string signature args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = '{}({})'.format(func.__name__, ', '.join(args_repr + kwargs_repr)) # If entry present in log, return corresponding output if os.path.isfile(fpath): with open(fpath, 'r', newline='') as f: reader = csv.reader(f, delimiter=delimiter) for row in reader: if row[0] == signature: logger.info('entry found in "{}"'.format(os.path.basename(fpath))) return out_type(row[1]) # Otherwise, compute output and log it into file before returning out = func(*args, **kwargs) with open(fpath, 'a', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=delimiter) writer.writerow([signature, str(out)]) return out return wrapper return wrapper_with_args def cachePKL(root, fcode_func): def wrapper_with_args(func): @wraps(func) def wrapper(*args, **kwargs): # Get file path from root and function arguments, using fcode function - fpath = os.path.join(root, '{}.pkl'.format(fcode_func(*args))) + fpath = os.path.join(os.path.abspath(root), '{}.pkl'.format(fcode_func(*args))) # If file exists, load output from it if os.path.isfile(fpath): print('loading data from "{}"'.format(fpath)) with open(fpath, 'rb') as f: out = pickle.load(f) # Otherwise, execute function and create the file to dump the output else: out = func(*args, **kwargs) print('dumping data in "{}"'.format(fpath)) with open(fpath, 'wb') as f: pickle.dump(out, f) return out return wrapper return wrapper_with_args def binarySearch(bool_func, args, ix, xbounds, dx_thr, history=None): ''' Use a binary search to determine the threshold satisfying a given condition within a continuous search interval. :param bool_func: boolean function returning whether condition is satisfied :param args: list of function arguments other than refined value :param xbounds: search interval for threshold (progressively refined) :param dx_thr: accuracy criterion for threshold :return: excitation threshold ''' # Assign empty history if first function call if history is None: history = [] # Compute function output at interval mid-point x = (xbounds[0] + xbounds[1]) / 2 sim_args = args[:] sim_args.insert(ix, x) history.append(bool_func(sim_args)) # If titration interval is small enough conv = False if (xbounds[1] - xbounds[0]) <= dx_thr: logger.debug('titration interval smaller than defined threshold') # If both conditions have been encountered during titration process, # we're going towards convergence if (0 in history and 1 in history): logger.debug('converging around threshold') # If current value satisfies condition, convergence is achieved # -> return threshold if history[-1]: logger.debug('currently satisfying condition -> convergence') return x # If only one condition has been encountered during titration process, # then no titration is impossible within the defined interval -> return NaN else: logger.warning('titration does not converge within this interval') return np.nan # Return threshold if convergence is reached, otherwise refine interval and iterate if conv: return x else: if x > 0.: xbounds = (xbounds[0], x) if history[-1] else (x, xbounds[1]) else: xbounds = (x, xbounds[1]) if history[-1] else (xbounds[0], x) return binarySearch(bool_func, args, ix, xbounds, dx_thr, history=history) def resolveDependencies(deps, join_items=True): ''' Solve a dictionary of dependencies. :param arg: dependency dictionary in which the values are the dependencies of their respective keys. :param join_items: boolean specifying whether or not to serialize output :return: list of inter-dependent elements in resolved order ''' # Transform input dictionary of lists into dictionary of sets, # while removing circular (auto) dependencies deps = dict((k, set([x for x in deps[k] if x != k])) for k in deps) # Initialize empty list of resolved dependencies resolved_deps = [] # Iterate while dependencies not entirely resolved while deps: # Extract latest items without dependencies (values that are not in keys # and keys without value) into a set nd_items = set(i for v in deps.values() for i in v) - set(deps.keys()) nd_items.update(k for k, v in deps.items() if not v) # Append new set of non-dependent items to output list resolved_deps.append(nd_items) # Remove those items from remaining dependencies in input dictionary deps = dict(((k, v - nd_items) for k, v in deps.items() if v)) # If specified, merge list of sets into a unique list (while preserving order) if join_items: tmp = [] for item in resolved_deps: tmp += list(item) resolved_deps = tmp return resolved_deps def plural(n): if n < 0: raise ValueError('Cannot format negative integer (n = {})'.format(n)) if n == 0: return '' else: return 's' diff --git a/deprecated/GPR/test_GPR1D.py b/deprecated/GPR/test_GPR1D.py deleted file mode 100644 index d046fa7..0000000 --- a/deprecated/GPR/test_GPR1D.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-04-24 11:04:39 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-05-26 18:34:14 - -''' Predict a 1D Vmeff profile using Gaussian Process Regression. ''' - -import pickle -import numpy as np -import matplotlib.pyplot as plt -from sklearn.gaussian_process import GaussianProcessRegressor -from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C - - -class Variable: - ''' dummy class to contain information about the variable ''' - - name = '' - unit = '' - lookup = '' - factor = 1. - max_error = 0. - - def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): - self.name = var_name - self.unit = var_unit - self.factor = var_factor - self.lookup = var_lookup - self.max_error = var_max_error - - -# Set data variable and Kriging parameters -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-2) -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) -# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) - - -# Define true function by interpolation from specific profile -def f(x): - return np.interp(x, Qm, xvect) - - -# Load coefficient profile -dirpath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/' -filepath = dirpath + 'lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' -filepath0 = dirpath + 'lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' -with open(filepath, 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - xvect = lookup[varinf.lookup] -with open(filepath0, 'rb') as fh: - lookup = pickle.load(fh) - xvect0 = lookup[varinf.lookup] - -# xvect = xvect - xvect0 - -# Define algorithmic parameters -n_iter_min = 10 -n_iter_max = 20 -max_pred_errors = [] -max_errors = [] -delta_factor = 10 - -# Define prediction vector -x = np.atleast_2d(np.linspace(-150., 150., 1000) * 1e-5).T -y = f(x).ravel() - -# Define initial samples and compute function at these points -X0 = np.atleast_2d(np.linspace(-150., 150., 10) * 1e-5).T -Y0 = f(X0).ravel() - -# Instantiate a Gaussian Process model -print('Creating Gaussian Process with RBF Kernel') -kernel = C(100.0, (1.0, 500.0)) * RBF(1e-4, (1e-5, 1e-3)) # + C(100.0, (1.0, 500.0)) * RBF(1e-5, (1e-5, 1e-3)) -gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8, normalize_y=True) - -# Fit to data using Maximum Likelihood Estimation of the parameters -print('Initial fitting') -gp.fit(X0, Y0) - -# Make the prediction on the meshed x-axis (ask for MSE as well) -print('Predicting over linear input range') -ypred0, ypred0_std = gp.predict(x, return_std=True) -max_err = np.amax(np.abs(y - ypred0)) -max_errors.append(max_err) -max_pred_error = np.amax(ypred0_std) -max_pred_errors.append(max_pred_error) -print('Initialization: Kernel =', gp.kernel_, - ', Max err = {:.2f} {}, Max pred. err = {:.2f} {}'.format( - max_err * varinf.factor, varinf.unit, max_pred_error * varinf.factor, varinf.unit)) - - -# Initial observation and prediction -yminus0 = ypred0 - delta_factor * ypred0_std -yplus0 = ypred0 + delta_factor * ypred0_std -fig, ax = plt.subplots() -ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') -ax.plot(X0 * 1e5, Y0 * varinf.factor, 'r.', markersize=10, label=u'Initial Observations') -ax.plot(x * 1e5, ypred0 * varinf.factor, 'b-', label=u'Initial Prediction') -ax.fill(np.concatenate([x, x[::-1]]) * 1e5, - np.concatenate([yminus0, yplus0[::-1]]) * varinf.factor, - alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) -ax.set_xlabel('$Q_m\ (nC/cm^2)$') -ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') -ax.legend() -ax.set_title('Initial observation and prediction') - -print('Optimizing prediction by adding samples iteratively') - -X = X0 -Y = Y0 -ypred = ypred0 -ypred_std = ypred0_std - -n_iter = 0 -while (max_pred_error > varinf.max_error and n_iter < n_iter_max) or n_iter < n_iter_min: - newX = x[np.argmax(ypred_std)] - newY = f(newX) - X = np.atleast_2d(np.insert(X.ravel(), -1, newX)).T - Y = np.insert(Y, -1, newY) - gp.fit(X, Y) - ypred, ypred_std = gp.predict(x, return_std=True) - max_err = np.amax(np.abs(y - ypred)) - max_errors.append(max_err) - max_pred_error = np.amax(ypred_std) - max_pred_errors.append(max_pred_error) - print('Step {}:'.format(n_iter + 1), ' Kernel =', gp.kernel_, - ', Max err = {:.2f} {}, Max pred. err = {:.2f} {}'.format( - max_err * varinf.factor, varinf.unit, max_pred_error * varinf.factor, varinf.unit)) - if (n_iter + 1) % 5 == 0: - yminus = ypred - delta_factor * ypred_std - yplus = ypred + delta_factor * ypred_std - fig, ax = plt.subplots() - ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') - ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final Observations') - ax.plot(x * 1e5, ypred * varinf.factor, 'b-', label=u'Final Prediction') - ax.fill(np.concatenate([x, x[::-1]]) * 1e5, - np.concatenate([yminus, yplus[::-1]]) * varinf.factor, - alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) - ax.set_xlabel('$Q_m\ (nC/cm^2)$') - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') - ax.legend() - ax.set_title('After {} steps'.format(n_iter + 1)) - n_iter += 1 - - -# Final observation and prediction -yminus = ypred - delta_factor * ypred_std -yplus = ypred + delta_factor * ypred_std -fig, ax = plt.subplots() -ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') -ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final Observations') -ax.plot(x * 1e5, ypred * varinf.factor, 'b-', label=u'Final Prediction') -ax.fill(np.concatenate([x, x[::-1]]) * 1e5, - np.concatenate([yminus, yplus[::-1]]) * varinf.factor, - alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) -ax.set_xlabel('$Q_m\ (nC/cm^2)$') -ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') -ax.legend() -ax.set_title('Final observation and prediction') - -# Evolution of max. absolute error -fig, ax = plt.subplots() -ax.plot(np.linspace(0, n_iter, n_iter + 1), max_errors) -ax.set_xlabel('# iterations') -ax.set_ylabel('Max. error ($' + varinf.unit + ')$') - -# Evolution of max. predicted error -fig, ax = plt.subplots() -ax.plot(np.linspace(0, n_iter, n_iter + 1), max_pred_errors) -ax.set_xlabel('# iterations') -ax.set_ylabel('Max. predicted error ($' + varinf.unit + ')$') - -plt.show() diff --git a/deprecated/GPR/test_GPR2D.py b/deprecated/GPR/test_GPR2D.py deleted file mode 100644 index 9d9ecd1..0000000 --- a/deprecated/GPR/test_GPR2D.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-04-24 11:04:39 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-06-01 13:38:57 - -''' Predict a 2D Vmeff profile using Gaussian Process Regression. ''' - -import os, ntpath -import pickle -import re -import numpy as np -from scipy.interpolate import griddata -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from sklearn.gaussian_process import GaussianProcessRegressor -from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C -from utils import OpenFilesDialog, rescale, rmse - - -class Variable: - ''' dummy class to contain information about the variable ''' - - name = '' - unit = '' - lookup = '' - factor = 1. - max_error = 0. - - def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): - self.name = var_name - self.unit = var_unit - self.factor = var_factor - self.lookup = var_lookup - self.max_error = var_max_error - - -# Define true function by interpolation from specific profiles -def f(x): - return griddata(points, values, x, method='linear', rescale=True) - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz dense/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -pltdir = 'C:/Users/admin/Desktop/GPR output/' - -# Set data variable and Kriging parameters -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1.0) -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e4) -# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) -# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) -# varinf = Variable('\\alpha_{n, eff}', 'ms^{-1}', 'alpha_n_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{n, eff}', 'ms^{-1}', 'beta_n_eff', 1e-3, 1e1) -# varinf = Variable('(p_{\\infty}\ /\ \\tau_p)_{eff}', 'ms^{-1}', 'pinf_over_taup_eff', 1e-3, 1e1) -# varinf = Variable('(1\ /\ \\tau_p)_{eff}', 'ms^{-1}', 'inv_taup_eff', 1e-3, 1e1) -# varinf = Variable('n_{g,on}', 'mole', 'ng_eff_on', 1e22, 1e1) -# varinf = Variable('n_{g,off}', 'mole', 'ng_eff_off', 1e22, 1e1) - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing lookup tables') - nfiles = len(lookup_filepaths) - amps = np.empty(nfiles) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - if i == 0: - Qm = lookup['Q'] - nQ = np.size(Qm) - var = np.empty((nfiles, nQ)) - var[i, :] = lookup[varinf.lookup] - else: - if np.array_equal(Qm, lookup['Q']): - var[i, :] = lookup[varinf.lookup] - else: - print('Error: charge vector not consistent') - - # Compute data metrics - namps = amps.size - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - varmin = np.amin(var) - varmax = np.amax(var) - print('Initial data:', nQ, 'charges,', namps, 'amplitudes') - - np.savetxt('tmp.txt', np.transpose(var)) - quit() - - # Define points for interpolation function - Q_mesh, A_mesh = np.meshgrid(Qm, amps) - points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) - values = var.flatten() - - # Define algorithmic parameters - n_iter_min = 10 - n_iter_max = 100 - MAE_pred = [] - MAE_true = [] - RMSE_true = [] - - # Define estimation vector - nAest = 50 - nQest = 100 - Aest = np.linspace(Amin, Amax, nAest) - Qest = np.linspace(Qmin, Qmax, nQest) - Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) - x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) - ytrue = f(x).ravel().reshape((nAest, nQest)) - - # Define initial observation vector - nAobs = 5 - nQobs = 20 - Aobs = np.linspace(Amin, Amax, nAobs) - Qobs = np.linspace(Qmin, Qmax, nQobs) - Qobs_mesh, Aobs_mesh = np.meshgrid(Qobs, Aobs) - X0 = np.column_stack([Aobs_mesh.flatten(), Qobs_mesh.flatten()]) - Y0 = f(X0).ravel() - - # np.savetxt('data_sparse.txt', np.column_stack([X0, Y0]), fmt='% .7e', delimiter=' ', newline='\n ') - # quit() - - - # Instantiate a Gaussian Process model - print('Creating Gaussian Process with RBF Kernel') - kernel = C(100.0, (1.0, 500.0)) * RBF((1e4, 1e-4), ((1e3, 1e5), (1e-5, 1e-3))) - gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8, normalize_y=True) - - # Fit to initial data using Maximum Likelihood Estimation of the parameters - print('Initial fitting') - gp.fit(X0, Y0) - X = X0 - Y = Y0 - - # Make the prediction on the meshed x-axis (ask for MSE as well) - print('Predicting over linear input range') - y0, y0_std = gp.predict(x, return_std=True) - y0 = y0.reshape((nAest, nQest)) - y0_std = y0_std.reshape((nAest, nQest)) - y0_err_true = np.abs(y0 - ytrue) - MAE_pred.append(np.amax(y0_std)) - MAE_true.append(np.amax(np.abs(y0 - ytrue))) - RMSE_true.append(rmse(y0, ytrue)) - print('Initialization: Kernel =', gp.kernel_) - print('predicted MAE = {:.2f} {}, true MAE = {:.2f} {}'.format(MAE_pred[-1] * varinf.factor, - varinf.unit, - MAE_true[-1] * varinf.factor, - varinf.unit)) - # Optimization - print('Optimizing prediction by adding samples iteratively') - n_iter = 0 - y_std = y0_std - while n_iter < n_iter_max and (MAE_pred[-1] > varinf.max_error or n_iter < n_iter_min): - new_X = x[np.argmax(y_std)] - X = np.vstack((X, new_X)) - Y = np.append(Y, f(new_X)) - gp.fit(X, Y) - y, y_std = gp.predict(x, return_std=True) - y = y.reshape((nAest, nQest)) - y_std = y_std.reshape((nAest, nQest)) - y_err_true = np.abs(y - ytrue) - MAE_pred.append(np.amax(y_std)) - MAE_true.append(np.amax(np.abs(y - ytrue))) - RMSE_true.append(rmse(y, ytrue)) - print('step {}:'.format(n_iter + 1), 'Kernel =', gp.kernel_) - print('predicted MAE = {:.2f} {}, true MAE = {:.2f} {}'.format(MAE_pred[-1] * varinf.factor, - varinf.unit, - MAE_true[-1] * varinf.factor, - varinf.unit)) - n_iter += 1 - - # Plotting - mymap = cm.get_cmap('viridis') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - var_levels = np.linspace(varmin, varmax, 20) * varinf.factor - sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, varmax * varinf.factor)) - sm_var._A = [] - varerr0_levels = np.linspace(0., np.amax(y0_err_true), 20) * varinf.factor - varerr_levels = np.linspace(0., np.amax(y_err_true), 20) * varinf.factor - sm_varerr0 = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., np.amax(y0_err_true) * varinf.factor)) - sm_varerr0._A = [] - sm_varerr = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., np.amax(y_err_true) * varinf.factor)) - sm_varerr._A = [] - - # True function profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('True function profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, ytrue[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig1.png', format='png') - - # True function map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('True function map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue * varinf.factor, levels=var_levels, - cmap='viridis') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig2.png', format='png') - - # Initial estimation profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Initial estimation profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, y0[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig3.png', format='png') - - # Initial estimation map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Initial estimation map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y0 * varinf.factor, levels=var_levels, - cmap='viridis') - ax.scatter(X0[:, 1] * 1e5, X0[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig4.png', format='png') - - # Initial error profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Initial error profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, (y0[i, :] - ytrue[i, :]) * varinf.factor, - c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig5.png', format='png') - - # Initial error map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Initial error map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y0_err_true * varinf.factor, levels=varerr0_levels, - cmap='viridis') - ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_varerr0, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig6.png', format='png') - - # Final estimation profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Final estimation profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, y[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig7.png', format='png') - - # Final estimation map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Final estimation map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y * varinf.factor, levels=var_levels, - cmap='viridis') - ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig8.png', format='png') - - # Final error profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Final error profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, (y[i, :] - ytrue[i, :]) * varinf.factor, - c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig9.png', format='png') - - # Final error map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Final error map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y_err_true * varinf.factor, levels=varerr_levels, - cmap='viridis') - ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_varerr, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig10.png', format='png') - - # Error evolution - fig, ax = plt.subplots() - iters = np.linspace(0, n_iter, n_iter + 1) - ax.plot(iters, np.array(MAE_true) * varinf.factor, label='true error') - ax.plot(iters, np.array(MAE_pred) * varinf.factor, label='predicted error') - ax.plot(iters, np.array(RMSE_true) * varinf.factor, label='true RMSE') - ax.set_xlabel('# iterations', fontsize=20) - ax.set_ylabel('Max. absolute error ($' + varinf.unit + ')$', fontsize=20) - ax.set_title('Error evolution', fontsize=20) - ax.legend(fontsize=20) - fig.savefig(pltdir + 'fig11.png', format='png') - - # plt.show() diff --git a/deprecated/GPR/test_GPR2D_multiout.py b/deprecated/GPR/test_GPR2D_multiout.py deleted file mode 100644 index 00bf94f..0000000 --- a/deprecated/GPR/test_GPR2D_multiout.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-04-24 11:04:39 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-06-01 17:05:42 - -''' Predict nine different 2D coefficients profile using Gaussian Process Regression. ''' - -import os -import ntpath -import pickle -import re -import logging -import warnings -import numpy as np -from scipy.interpolate import griddata -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from mpl_toolkits.mplot3d import Axes3D -from sklearn.gaussian_process import GaussianProcessRegressor as GPR -from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C -from utils import OpenFilesDialog, rmse, lh2DWithCorners - - -# Define true function by interpolation from specific profile -def f(x): - out = np.empty((x.shape[0], nvar)) - for k in range(nvar): - out[:, k] = griddata(points, values[:, k], x, method='linear', rescale=True) - return out - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz dense/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -outdir = 'C:/Users/admin/Desktop/GPRmultiout output/' - -# Define logging settings and clear log file -logfile = outdir + 'GPR2D_multiout.log' -logging.basicConfig(filename=logfile, level=logging.DEBUG, - format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') -with open(logfile, 'w'): - pass - -lookups = ['V_eff', 'alpha_m_eff', 'beta_m_eff', 'alpha_h_eff', 'beta_h_eff', 'alpha_n_eff', - 'beta_n_eff', 'pinf_over_taup_eff', 'inv_taup_eff', 'ng_eff_on', 'ng_eff_off'] -nvar = len(lookups) - -max_errors = [1e0, 1e3, 1e3, 1e8, 1e2, 1e2, 1e4, 1e8, 1e9] -Ckernels = [C(100.0, (1.0, 500.0)), C(1e3, (1e0, 1e5)), C(1e3, (1e0, 1e5)), C(1e5, (1e0, 1e9)), - C(1e2, (1e0, 1e4)), C(1e2, (1e0, 1e4)), C(1e4, (1e0, 1e6)), C(1e5, (1e0, 1e9)), - C(1e5, (1e0, 1e9)), C(1e0, (1e-1, 1e1)), C(1e0, (1e-1, 1e1))] - -factors = [1e0] + [1e-3 for i in range(8)] + [1e0 for i in range(2)] - -units = ['mV'] + ['ms-1' for i in range(8)] + ['1e-22 mole' for i in range(2)] - -plot_names = ['V_{m, eff}', '\\alpha_{m, eff}', '\\beta_{m, eff}', '\\alpha_{h, eff}', - '\\beta_{h, eff}', '\\alpha_{n, eff}', '\\beta_{n, eff}', - 'p_{\\infty}/\\tau_p', '1/\\tau_p', 'n_{g,on}', 'n_{g,off}'] - -plot_units = ['mV'] + ['ms^{-1}' for i in range(8)] + ['10^{-22}\ mole' for i in range(2)] - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing lookup tables') - logging.info('Files selected - importing lookup tables') - nfiles = len(lookup_filepaths) - amps = np.empty(nfiles) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - a = float(mo.group(1)) * 1e-9 - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup_data = pickle.load(fh) - if i == 0: - Qmfull = lookup_data['Q'] - Qm = Qmfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] - nQ = np.size(Qm) - var = np.empty((nfiles, nQ, nvar)) - for j in range(nvar): - varfull = lookup_data[lookups[j]] - var[i, :, j] = varfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] - else: - Qmfull = lookup_data['Q'] - if np.array_equal(Qm, Qmfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)]): - for j in range(nvar): - varfull = lookup_data[lookups[j]] - var[i, :, j] = varfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] - else: - print('Error: charge vector not consistent') - - - # Multiplying the gas molar contents - var[:, :, -2] = var[:, :, -2] * 1e22 - var[:, :, -1] = var[:, :, -1] * 1e22 - - # Compute data metrics - namps = amps.size - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - varmin = np.amin(var, axis=(0, 1)) - varmax = np.amax(var, axis=(0, 1)) - logstr = 'Initial data: {} charges, {} amplitudes'.format(nQ, namps) - print(logstr) - logging.info(logstr) - - # Define points for interpolation function - Q_mesh, A_mesh = np.meshgrid(Qm, amps) - points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) - values = var.reshape(namps * nQ, nvar) - - # Define algorithmic parameters - n_iter_max = 100 - MAE_pred = [] - MAE_true = [] - RMSE_true = [] - - # Define estimation grid - nAest = 50 - nQest = 100 - Aest = np.linspace(Amin, Amax, nAest) - Qest = np.linspace(Qmin, Qmax, nQest) - Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) - x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) - ytrue = f(x).reshape((nAest, nQest, nvar)) - logstr = 'Estimation grid: {} charges, {} amplitudes'.format(nQest, nAest) - print(logstr) - logging.info(logstr) - - # Define initial observation grid - n0 = 24 - X0 = lh2DWithCorners(n0, (Amin, Amax), (Qmin, Qmax), 'center') - Y0 = f(X0) - logstr = 'Initial observation grid: Latin Hypercube ({} samples) with 4 corners'.format(n0 - 4) - print(logstr) - logging.info(logstr) - - # Instantiate Gaussian Process models - logstr = 'Creating {} Gaussian Processes with scaled RBF Kernels'.format(nvar) - print(logstr) - logging.info(logstr) - kernels = [Ck * RBF((1e4, 1e-4), ((1e3, 1e5), (1e-5, 1e-3))) for Ck in Ckernels] - gprs = [GPR(kernel=k, n_restarts_optimizer=8, normalize_y=True) for k in kernels] - - - # Fit to initial data using Maximum Likelihood Estimation of the parameters - print('Step 0') - logging.info('-------------------------- Initialization --------------------------') - - logstr = 'Fitting' - print(logstr) - logging.info(logstr) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - for i in range(nvar): - gprs[i].fit(X0, Y0[:, i]) - - - # Make the prediction on the meshed x-axis (ask for MSE as well) - logstr = 'Predicting' - print(logstr) - logging.info(logstr) - y0 = np.empty((nAest * nQest, nvar)) - y0_std = np.empty((nAest * nQest, nvar)) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - for i in range(nvar): - y0[:, i], y0_std[:, i] = gprs[i].predict(x, return_std=True) - y0 = y0.reshape((nAest, nQest, nvar)) - y0_std = y0_std.reshape((nAest, nQest, nvar)) - MAE_pred.append(np.amax(y0_std, axis=(0, 1))) - MAE_true.append(np.amax(np.abs(y0 - ytrue), axis=(0, 1))) - RMSE_true.append(np.array([rmse(y0[:, :, i], ytrue[:, :, i]) for i in range(nvar)])) - logging.info('Kernels:') - for i in range(nvar): - logging.info(' {}: {}'.format(lookups[i], gprs[i].kernel_)) - logging.info('predicted MAEs:') - for i in range(nvar): - logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_pred[-1][i] * factors[i], units[i])) - logging.info('true MAEs:') - for i in range(nvar): - logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_true[-1][i] * factors[i], units[i])) - - # Copy initial data for iterations - X = np.moveaxis(np.array([X0 for i in range(nvar)]), 0, -1) - Y = Y0 - - - # Optimization - print('Optimizing prediction by adding samples iteratively') - n_iter = 0 - y_std = y0_std - y_flat = np.empty((nAest * nQest, nvar)) - y_std_flat = np.empty((nAest * nQest, nvar)) - while n_iter < n_iter_max: - print('Step', n_iter + 1) - logstr = '-------------------------- Step {} --------------------------'.format(n_iter + 1) - logging.info(logstr) - - print('Determining new samples') - iMAEs = [np.argmax(y_std[:, :, i]) for i in range(nvar)] - newX = x[iMAEs, :] - X = np.concatenate((X, np.expand_dims(np.transpose(newX), axis=0)), axis=0) - newY = np.expand_dims(np.array([f(newX[i, :])[0, i] for i in range(nvar)]), axis=0) - Y = np.vstack((Y, newY)) - - logstr = 'Fitting' - print(logstr) - logging.info(logstr) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - for i in range(nvar): - gprs[i].fit(X[:, :, i], Y[:, i]) - - logstr = 'Predicting' - print(logstr) - logging.info(logstr) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - for i in range(nvar): - y_flat[:, i], y_std_flat[:, i] = gprs[i].predict(x, return_std=True) - y = y_flat.reshape((nAest, nQest, nvar)) - y_std = y_std_flat.reshape((nAest, nQest, nvar)) - y_err_true = np.abs(y - ytrue) - - MAE_pred.append(np.amax(y_std, axis=(0, 1))) - MAE_true.append(np.amax(np.abs(y - ytrue), axis=(0, 1))) - RMSE_true.append(np.array([rmse(y[:, :, i], ytrue[:, :, i]) for i in range(nvar)])) - logging.info('Kernels:') - for i in range(nvar): - logging.info(' {}: {}'.format(lookups[i], gprs[i].kernel_)) - logging.info('predicted MAEs:') - for i in range(nvar): - logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_pred[-1][i] * factors[i], units[i])) - logging.info('true MAEs:') - for i in range(nvar): - logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_true[-1][i] * factors[i], units[i])) - - n_iter += 1 - - # Saving - gprs_dict = {} - for i in range(nvar): - gprs_dict[lookups[i]] = gprs[i] - predictor_file = 'predictors_a{:.1f}nm_f{:.1f}kHz.pkl'.format(a * 1e9, Fdrive * 1e-3) - logstr = 'Saving predictors dictionary in output file: {}'.format(predictor_file) - logging.info(logstr) - print(logstr) - with open(outdir + predictor_file, 'wb') as fh: - pickle.dump(gprs_dict, fh) - - # Plotting - print('Plotting') - mymap = cm.get_cmap('viridis') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - var_levels = np.array([np.linspace(varmin[i], varmax[i], 20) * factors[i] for i in range(nvar)]) - sm_var = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin[i] * factors[i], - varmax[i] * factors[i])) - for i in range(nvar)] - for smv in sm_var: - smv._A = [] - varerr0_levels = np.array([np.linspace(0., MAE_pred[0][i], 20) * factors[i] for i in range(nvar)]) - varerr_levels = np.array([np.linspace(0., MAE_pred[-1][i], 20) * factors[i] for i in range(nvar)]) - sm_varerr0 = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., - MAE_pred[0][i] * factors[i])) - for i in range(nvar)] - for smv in sm_varerr0: - smv._A = [] - sm_varerr = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., - MAE_pred[-1][i] * factors[i])) - for i in range(nvar)] - for smv in sm_varerr: - smv._A = [] - - - - for i in range(nvar): - - print('figure {}/{}'.format(i + 1, nvar)) - - # RESPONSE SURFACE - fig = plt.figure(figsize=(24, 12)) - - # True function - ax = fig.add_subplot(2, 3, 1, projection='3d') - ax.set_title('True function', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) - ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) - ax.xaxis._axinfo['label']['space_factor'] = 3.0 - ax.yaxis._axinfo['label']['space_factor'] = 3.0 - ax.zaxis._axinfo['label']['space_factor'] = 3.0 - ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, ytrue[:, :, i] * factors[i], cmap=mymap) - - # Initial prediction - ax = fig.add_subplot(2, 3, 2, projection='3d') - ax.set_title('Initial prediction', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) - ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) - ax.xaxis._axinfo['label']['space_factor'] = 3.0 - ax.yaxis._axinfo['label']['space_factor'] = 3.0 - ax.zaxis._axinfo['label']['space_factor'] = 3.0 - ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, y0[:, :, i] * factors[i], cmap=mymap) - - # Final prediction - ax = fig.add_subplot(2, 3, 3, projection='3d') - ax.set_title('Final prediction', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) - ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) - ax.xaxis._axinfo['label']['space_factor'] = 3.0 - ax.yaxis._axinfo['label']['space_factor'] = 3.0 - ax.zaxis._axinfo['label']['space_factor'] = 3.0 - ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, y[:, :, i] * factors[i], cmap=mymap) - - # Sampling map - ax = fig.add_subplot(2, 3, 4) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20, labelpad=10) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20, labelpad=10) - ax.set_title('Sampling map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue[:, :, i] * factors[i], levels=var_levels[i, :], - cmap='viridis') - ax.scatter(X[:n0, 1, i] * 1e5, X[:n0, 0, i] * 1e-3, c='black', label='init. samples') - ax.scatter(X[n0:, 1, i] * 1e5, X[n0:, 0, i] * 1e-3, c='red', label='added samples') - ax.set_ylim(0.0, 1.15 * Amax * 1e-3) - # ax.legend(fontsize=20, loc=3) - ax.legend(fontsize=20, loc=9, ncol=2) - - # Initial error - ax = fig.add_subplot(2, 3, 5, projection='3d') - ax.set_title('Initial error', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) - ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) - ax.xaxis._axinfo['label']['space_factor'] = 3.0 - ax.yaxis._axinfo['label']['space_factor'] = 3.0 - ax.zaxis._axinfo['label']['space_factor'] = 3.0 - ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, - (y0[:, :, i] - ytrue[:, :, i]) * factors[i], cmap=mymap) - - # Final error - ax = fig.add_subplot(2, 3, 6, projection='3d') - ax.set_title('Final error', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) - ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) - ax.xaxis._axinfo['label']['space_factor'] = 3.0 - ax.yaxis._axinfo['label']['space_factor'] = 3.0 - ax.zaxis._axinfo['label']['space_factor'] = 3.0 - ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, - (y[:, :, i] - ytrue[:, :, i]) * factors[i], cmap=mymap) - - - plt.tight_layout() - fig.savefig(outdir + lookups[i] + '_surf.png', format='png') - plt.close(fig) diff --git a/deprecated/GPR/test_bayesian_optimization.py b/deprecated/GPR/test_bayesian_optimization.py deleted file mode 100644 index ba3b3af..0000000 --- a/deprecated/GPR/test_bayesian_optimization.py +++ /dev/null @@ -1,119 +0,0 @@ -import importlib -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from bayes_opt import BayesianOptimization -import numpy as np -import matplotlib.pyplot as plt -from matplotlib import gridspec -import pickle -from utils import LoadParams, rescale -from constants import * - - - -# def getCoeff(nbls, Fdrive, Adrive, phi, Qm): - -# # Set time vector -# T = 1 / Fdrive -# t = np.linspace(0, T, NPC_FULL) -# dt = t[1] - t[0] - -# # Run STIM ON simulation and retrieve deflection and gas content vectors from last cycle -# (_, y_on, _) = nbls.runMech(Adrive, Fdrive, phi, Qm) -# (_, Z, _) = y_on -# deflections = Z[-NPC_FULL:] - -# # Compute membrane capacitance and potential vectors -# capacitances = np.array([nbls.Capct(ZZ) for ZZ in deflections]) -# elastance_integral = np.trapz(1 / capacitances, dx=dt) -# Vmeff = Qm * elastance_integral / T - -# return Vmeff - - -def target(x): - return np.interp(x, Qm, Vmeff) - - -def posterior(bo, x, xmin=0, xmax=150.e-5): - bo.gp.fit(bo.X, bo.Y) - mu, sigma = bo.gp.predict(x, return_std=True) - return mu, sigma - -def plot_gp(bo, x, y): - - fig = plt.figure(figsize=(16, 10)) - fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(bo.X)), fontdict={'size':30}) - - gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) - axis = plt.subplot(gs[0]) - acq = plt.subplot(gs[1]) - - mu, sigma = posterior(bo, x) - axis.plot(x, y, linewidth=3, label='Target') - axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, label=u'Observations', color='r') - axis.plot(x, mu, '--', color='k', label='Prediction') - - axis.fill(np.concatenate([x, x[::-1]]), - np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]), - alpha=.6, fc='c', ec='None', label='95% confidence interval') - - axis.set_xlim((0., 150.e-5)) - axis.set_ylim((None, None)) - axis.set_ylabel('f(x)', fontdict={'size':20}) - axis.set_xlabel('x', fontdict={'size':20}) - - utility = bo.util.utility(x, bo.gp, 0) - acq.plot(x, utility, label='Utility Function', color='purple') - acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15, - label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1) - acq.set_xlim((0., 150.e-5)) - # acq.set_ylim((0, np.max(utility) + 0.5)) - acq.set_ylabel('Utility', fontdict={'size':20}) - acq.set_xlabel('x', fontdict={'size':20}) - - axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.) - acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.) - - - -filepath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' -filepath0 = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' - -with open(filepath, 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - Vmeff = lookup['V_eff'] - -with open(filepath0, 'rb') as fh: - lookup = pickle.load(fh) - Vmbase = lookup['V_eff'] - -Vmeff = -(Vmeff - Vmbase) - - -nQ = 100 -x = np.linspace(0., 150., nQ).reshape(-1, 1) * 1e-5 -y = np.empty(nQ) -for i in range(nQ): - y[i] = target(x[i]) -fig, ax = plt.subplots() -ax.set_xlabel('$Q_m\ (nC/cm^2)$') -ax.set_ylabel('$V_{m, eff}\ (mV)$') -ax.plot(x * 1e5, y) - - -bo = BayesianOptimization(target, {'x': (0., 150.e-5)}) -bo.maximize(init_points=10, n_iter=0, acq='ei', kappa=1) -plot_gp(bo, x, y) - -# bo.maximize(init_points=10, n_iter=0, acq='ei', kappa=5) -# plot_gp(bo, x, y) -for i in range(5): - bo.maximize(init_points=0, n_iter=1, acq='ei', kappa=1) -plot_gp(bo, x, y) - -plt.show() - - diff --git a/deprecated/Taylor expansions/plot_rates_derivatives.py b/deprecated/Taylor expansions/plot_rates_derivatives.py deleted file mode 100644 index f9af8c8..0000000 --- a/deprecated/Taylor expansions/plot_rates_derivatives.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-03-22 16:04:37 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-03-29 18:17:52 - -''' Plot profiles of rate constants functions and derivatives ''' - -import numpy as np -import matplotlib.pyplot as plt -from utils import bilinearExp, stdExp, dualExp, symExp, sigmoid - - -# Define function parameters -am_params = (-43.2, -0.32, 0.25) -Bm_params = (-16.2, 0.28, -0.20) -ah_params = (-39.2, 0.128, 1 / 18) -Bh_params = (-16.2, 4, 0.20) -an_params = (-41.2, -0.032, 0.20) -Bn_params = (-46.2, 0.5, 0.025) -pinf_params = (-35.0, 1, 0.1) -Tp_params = (-35.0, 0.608, 3.3, 0.05) -invTp_params = (-35.0, 1 / 0.608, 3.3, 0.05) - - -# Define potential range and maximal derivation order -nVm = 100 -Vm = np.linspace(-80.0, 50.0, nVm) # mV -norder = 3 - - -# Define vectors -dalpham = np.empty((norder + 1, nVm)) -dbetam = np.empty((norder + 1, nVm)) -dalphah = np.empty((norder + 1, nVm)) -dbetah = np.empty((norder + 1, nVm)) -dalphan = np.empty((norder + 1, nVm)) -dbetan = np.empty((norder + 1, nVm)) -dpinf = np.empty((norder + 1, nVm)) -dtaup = np.empty((norder + 1, nVm)) -dinvTp = np.empty((norder + 1, nVm)) -dpinfoverTp = np.empty((norder + 1, nVm)) - - -# Compute derivatives -for i in range(norder + 1): - dalpham[i, :] = bilinearExp(Vm, am_params, i) - dbetam[i, :] = bilinearExp(Vm, Bm_params, i) - dalphah[i, :] = stdExp(Vm, ah_params, i) - dbetah[i, :] = sigmoid(Vm, Bh_params, i) - dalphan[i, :] = bilinearExp(Vm, an_params, i) - dbetan[i, :] = stdExp(Vm, Bn_params, i) - dpinf[i, :] = sigmoid(Vm, pinf_params, i) - dtaup[i, :] = symExp(Vm, Tp_params, i) * 1e3 - dinvTp[i, :] = dualExp(Vm, invTp_params, i) * 1e-3 - - -# Compute pinf/taup derivatives -dpinfoverTp[0, :] = dpinf[0, :] * dinvTp[0, :] -dpinfoverTp[1, :] = dpinf[1, :] * dinvTp[0, :] + dpinf[0, :] * dinvTp[1, :] -dpinfoverTp[2, :] = dpinf[2, :] * dinvTp[0, :] + dpinf[1, :] * dinvTp[1, :]\ - + dpinf[0, :] * dinvTp[2, :] -dpinfoverTp[3, :] = dpinf[3, :] * dinvTp[0, :] + 3 * dpinf[2, :] * dinvTp[1, :]\ - + 3 * dpinf[1, :] * dinvTp[2, :] + dpinf[0, :] * dinvTp[3, :] - - -# Define plot parameters -seqx = (0, 0, 1, 1) -seqy = (0, 1, 0, 1) -f_str1 = ('$[ms^{-1}]$', '$d\ [ms^{-1}.mV^{-1}]$', '$d^2\ [ms^{-1}.mV^{-2}]$', - '$d^3\ [ms^{-1}.mV^{-3}]$') -f_str2 = ('$[-]$', '$d\ [mV^{-1}]$', '$d^2\ [mV^{-2}]$', '$d^3\ [mV^{-3}]$') -f_str3 = ('$[ms]$', '$d\ [ms.mV^{-1}]$', '$d^2\ [ms.mV^{-2}]$', '$d^3\ [ms.mV^{-3}]$') -titles1 = ('$\\alpha_m$', '$\\beta_m$', '$\\alpha_h$', '$\\beta_h$', '$\\alpha_n$', '$\\beta_n$') -titles2 = ('$\\frac{1}{\\tau_p}$', '$\\frac{p_{\\infty}}{\\tau_p}$') -vectors1 = (dalpham, dbetam, dalphah, dbetah, dalphan, dbetan) -vectors2 = (dinvTp, dpinfoverTp) - -# Plot alpha and beta functions -for j in range(len(vectors1)): - fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) - for i in range(4): - ax = axes[seqx[i], seqy[i]] - ax.set_xlabel('$V_m \ [mV]$', fontsize=24) - ax.set_ylabel(f_str1[i], fontsize=24) - ax.plot(Vm, vectors1[j][i, :]) - fig.suptitle(titles1[j], fontsize=30) - - -# Plot p_inf functions -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) -for i in range(4): - ax = axes[seqx[i], seqy[i]] - ax.set_xlabel('$V_m \ [mV]$', fontsize=24) - ax.set_ylabel(f_str2[i], fontsize=24) - ax.plot(Vm, dpinf[i, :]) -fig.suptitle('$p_{\\infty}$', fontsize=30) - - -# Plot tau_p functions -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) -for i in range(4): - ax = axes[seqx[i], seqy[i]] - ax.set_xlabel('$V_m \ [mV]$', fontsize=24) - ax.set_ylabel(f_str3[i], fontsize=24) - ax.plot(Vm, dtaup[i, :]) -fig.suptitle('$\\tau_p$', fontsize=30) - - -# Plot invTaup and pinf/Taup functions -for j in range(len(vectors2)): - fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) - for i in range(4): - ax = axes[seqx[i], seqy[i]] - ax.set_xlabel('$V_m \ [mV]$', fontsize=24) - ax.set_ylabel(f_str1[i], fontsize=24) - ax.plot(Vm, vectors2[j][i, :]) - fig.suptitle(titles2[j], fontsize=30) - - -plt.show() diff --git a/deprecated/Taylor expansions/test_alpham_Taylor.py b/deprecated/Taylor expansions/test_alpham_Taylor.py deleted file mode 100644 index f30b6dc..0000000 --- a/deprecated/Taylor expansions/test_alpham_Taylor.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-03-21 11:38:56 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2018-09-28 14:14:11 - - -''' Taylor expansions of the alpha_m function around different potential values. ''' - -import numpy as np -from scipy.special import factorial -import matplotlib.pyplot as plt -from utils import bilinearExp - -# Vm vector -nVm = 100 -Vm = np.linspace(-80.0, 50.0, nVm) # mV - -# alpha_m vector -am_params = (-43.2, -0.32, 0.25) -alpham = bilinearExp(Vm, am_params, 0) - -# alpha_m Taylor expansion -npoints = 10 -norder = 4 -Vm0 = np.linspace(-80.0, 50.0, npoints) # mV -Vmdiff = Vm - np.tile(Vm0, (nVm, 1)).transpose() -Talpham = np.empty((npoints, nVm)) -for i in range(npoints): - T = np.zeros(nVm) - for j in range(norder + 1): - T[:] += bilinearExp(Vm0[i], am_params, j) * Vmdiff[i, :]**j / factorial(j) - Talpham[i, :] = T - -# Plot standard alpha_m vs. Taylor reconstruction around Vm0 -_, ax = plt.subplots(figsize=(22, 10)) -ax.set_xlabel('$V_m\ [mV]$', fontsize=20) -ax.set_ylabel('$[ms^{-1}]$', fontsize=20) -ax.plot(Vm, alpham, linewidth=2, label='$\\alpha_m$') -for i in range(npoints): - ax.plot(Vm, Talpham[i, :], linewidth=2, label='$T_{}\\alpha_m({:.1f})$'.format(norder, Vm0[i])) -ax.legend(fontsize=20) - -plt.show() diff --git a/deprecated/Taylor expansions/test_alpham_eff_Taylor.py b/deprecated/Taylor expansions/test_alpham_eff_Taylor.py deleted file mode 100644 index ad8dd25..0000000 --- a/deprecated/Taylor expansions/test_alpham_eff_Taylor.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-03-21 11:38:56 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2018-09-28 14:14:11 - - -''' Perform Taylor expansions (up to 4th order) of the alpha_m function - along one acoustic cycle. ''' - -import importlib -import numpy as np -from scipy.special import factorial -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import nblscore -from utils import LoadParams, rescale, bilinearExp -from constants import * -importlib.reload(nblscore) # reloading nblscore module - - -# Load NBLS parameters -params = LoadParams("params.yaml") -biomech = params['biomech'] -ac_imp = biomech['rhoL'] * biomech['c'] # Rayl - -# Set geometry of NBLS structure -a = 32e-9 # in-plane radius (m) -d = 0.0e-6 # embedding tissue thickness (m) -geom = {"a": a, "d": d} - -# Create a NBLS instance here (with dummy frequency parameter) -nbls = nblscore.NeuronalBilayerSonophore(geom, params, 0.0, True) - -# Set stimulation parameters -Fdrive = 3.5e5 # Hz -Adrive = 1e5 # Pa -phi = np.pi # acoustic wave phase - -# Set charge linear space -nQ = 100 -charges = np.linspace(-80.0, 50.0, nQ) * 1e-5 # C/m2 -Qmin = np.amin(charges) -Qmax = np.amax(charges) - -# Set alpha_m parameters -am_params = (-43.2, -0.32, 0.25) - -# Set highest Taylor expansion order -norder = 4 - -# Set time vector -T = 1 / Fdrive -t = np.linspace(0, T, NPC_FULL) -dt = t[1] - t[0] - -# Initialize coefficients vectors -deflections = np.empty((nQ, NPC_FULL)) -Vm = np.empty((nQ, NPC_FULL)) -alpham = np.empty((nQ, NPC_FULL)) - - -# Run mechanical simulations for each imposed charge density -print('Running {} mechanical simulations with imposed charge densities'.format(nQ)) -simcount = 0 -for i in range(nQ): - simcount += 1 - - # Log to console - print('--- sim {}/{}: Q = {:.1f} nC/cm2'.format(simcount, nQ, charges[i] * 1e5)) - - # Run simulation and retrieve deflection vector - (_, y, _) = nbls.runMech(Adrive, Fdrive, phi, charges[i]) - (_, Z, _) = y - deflections[i, :] = Z[-NPC_FULL:] - - # Compute Vm and alpham vectors - Vm[i, :] = [charges[i] / nbls.Capct(ZZ) for ZZ in deflections[i, :]] - alpham[i, :] = bilinearExp(Vm[i, :] * 1e3, am_params, 0) - - -# time-average Vm and alpham -Vmavg = np.mean(Vm, axis=1) -alphamavg = np.mean(alpham, axis=1) - -# (Vm - Vmavg) differences along cycle -Vmavgext = np.tile(Vmavg, (NPC_FULL, 1)).transpose() -Vmdiff = (Vm - Vmavgext) * 1e3 - -# alpham derivatives -dalpham = np.empty((norder + 1, nQ)) -for j in range(norder + 1): - dalpham[j, :] = bilinearExp(Vmavg * 1e3, am_params, j) - -# Taylor expansions along cycle -Talpham = np.empty((norder + 1, nQ, NPC_FULL)) -dalphamext = np.tile(dalpham.transpose(), (NPC_FULL, 1, 1)).transpose() -Talpham[0, :, :] = dalphamext[0, :, :] -for j in range(1, norder + 1): - jterm = dalphamext[j, :, :] * Vmdiff[:, :]**j / factorial(j) - Talpham[j, :, :] = Talpham[j - 1, :, :] + jterm - -# time-averaging of Taylor expansions -Talphamavg = np.mean(Talpham, axis=2) - - -# ------------------ PLOTS ------------------- - -mymap = cm.get_cmap('jet') -sm_Q = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Qmin * 1e5, Qmax * 1e5)) -sm_Q._A = [] -t_factor = 1e6 - -# 1: time average Vm -_, ax = plt.subplots(figsize=(22, 10)) -ax.set_xlabel('$Qm\ [uF/cm^2]$', fontsize=20) -ax.set_ylabel('$\\overline{V_m}\ [mV]$', fontsize=20) -ax.plot(charges * 1e5, Vmavg * 1e3, linewidth=2) - -# 2: alpham: standard time-averaged vs.evaluated at time-average Vm -# vs. Taylor reconstructions around Vm_avg -_, ax = plt.subplots(figsize=(22, 10)) -ax.set_xlabel('$Qm\ [uF/cm^2]$', fontsize=20) -ax.set_ylabel('$[ms^{-1}]$', fontsize=20) -ax.plot(charges * 1e5, alphamavg, linewidth=2, label='$\\overline{\\alpha_m(V_m)}$') -for j in range(norder + 1): - ax.plot(charges * 1e5, Talphamavg[j, :], linewidth=2, - label='$\\overline{T_' + str(j) + '[\\alpha_m(\\overline{V_m})]}$') - ax.legend(fontsize=20) - -# 3: original alpham vs. highest order Taylor alpham reconstruction -_, ax = plt.subplots(figsize=(22, 10)) -ax.set_xlabel('$t \ (us)$', fontsize=20) -ax.set_ylabel('$[ms^{-1}]$', fontsize=20) -ax.plot(t * t_factor, alpham[0, :], linewidth=2, - c=mymap(rescale(charges[0], Qmin, Qmax)), label='$\\overline{\\alpha_m(V_m)}$') -ax.plot(t * t_factor, Talpham[-1, 0, :], '--', linewidth=2, - c=mymap(rescale(charges[0], Qmin, Qmax)), - label='$T_' + str(norder) + '[\\alpha_m(\\overline{V_m})]$') -for i in range(1, nQ): - ax.plot(t * t_factor, alpham[i, :], linewidth=2, - c=mymap(rescale(charges[i], Qmin, Qmax))) - ax.plot(t * t_factor, Talpham[-1, i, :], '--', linewidth=2, - c=mymap(rescale(charges[i], Qmin, Qmax))) -cbar = plt.colorbar(sm_Q) -cbar.ax.set_ylabel('$Q \ (nC/cm^2)$', fontsize=28) -ax.legend(fontsize=20) -plt.tight_layout() - -plt.show() diff --git a/deprecated/curve fitting/fit_Vmeff.py b/deprecated/curve fitting/fit_Vmeff.py deleted file mode 100644 index ed7cbaa..0000000 --- a/deprecated/curve fitting/fit_Vmeff.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-01-17 11:41:53 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-05-30 10:00:56 - -''' Detailed fitting strategy of the Vm_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rescale, rsquared, rmse, find_nearest - - -def supraGauss(x, x0, a, b): - return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) - - -def absPow(x, x0, a, b, c): - return a * np.abs(x - x0)**b + c - - -def sigmoid(x, x0, a): - return 1 - 1 / (1 + np.abs(x / x0)**a) - - -def hybridPowGauss(x, a, b, c, d): - return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) - - -def hybridPowSigmoid(x, x0, a, b, c): - return sigmoid(x, x0, b) * absPow(x, 0.0, a, c, 0.0) - - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz charge extended/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing Vm_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - Vm_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - Vm_eff[i, :] = lookup['V_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - Vm_eff_sub = (Vm_eff - Vm_eff[baseline_ind, :]) - - # Symmetrization - print('dividing by Qm to get even profiles') - Vm_eff_sub_even = Vm_eff_sub / Qm - - # Peaks fitting on even profiles - print('fitting power law to peaks of even profiles') - Vm_eff_sub_even_peaks = np.amax(Vm_eff_sub_even, axis=1) - Vm_eff_sub_even_peaks[0] = 0. - pguess_peaks = (1e4, 1.6, 3.5, 0.4) - popt, _ = curve_fit(hybridPowSigmoid, amps, Vm_eff_sub_even_peaks, p0=pguess_peaks) - Vm_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) - - # Normalization - print('normalizing even profiles') - Vm_eff_sub_even_norm = Vm_eff_sub_even[1:, :]\ - / Vm_eff_sub_even_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting hybrid gaussian-power law to normalized Vm_eff') - Vm_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 4)) - for i in range(namps - 1): - popt, _ = curve_fit(hybridPowGauss, Qm, Vm_eff_sub_even_norm[i, :], - bounds=([0., 0., -1e5, 0.], - [1e5, 1e5, 0., 1e5])) - Vm_eff_sub_even_norm_fit[i, :] = hybridPowGauss(Qm, *popt) - params[i, :] = np.asarray(popt) - - - # Predict Vm_eff profiles - print('predicting Vm_eff by reconstructing from fits') - Vm_eff_sub_even_predict = np.vstack((np.zeros(nQ), Vm_eff_sub_even_norm_fit))\ - * Vm_eff_sub_even_peaks_fit.reshape(namps, 1) - Vm_eff_sub_predict = Vm_eff_sub_even_predict * Qm - Vm_eff_predict = Vm_eff_sub_predict + Vm_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - Vm_eff_trueQ = Vm_eff[:, i_trueQ_lb:i_trueQ_ub] - Vm_eff_predict_trueQ = Vm_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - Vm_eff_diff = Vm_eff_predict - Vm_eff - Vm_eff_diff_trueQ = Vm_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - Vm_eff_maxdiff = np.amax(np.abs(Vm_eff_diff), axis=1) - Vm_eff_maxdiff_trueQ = np.amax(np.abs(Vm_eff_diff_trueQ), axis=1) - Vm_eff_rmse = np.empty(namps) - Vm_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - Vm_eff_rmse[i] = rmse(Vm_eff[i, :], Vm_eff_predict[i, :]) - Vm_eff_rmse_trueQ[i] = rmse(Vm_eff_trueQ[i, :], Vm_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: Vm_eff - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff}\ (mV)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, Vm_eff[i, :], c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: Vm_eff_sub - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff-sub}\ (mV)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, Vm_eff_sub[i, :], c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: Vm_eff_sub_even - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff-sub-even}\ (mV\ cm^2/nC)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, Vm_eff_sub_even[i, :], c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 4: Vm_eff_sub_even_peaks - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff-sub-even-peaks}\ (mV\ cm^2/nC)$', fontsize=28) - ax.scatter(amps * 1e-3, Vm_eff_sub_even_peaks, s=30, c='C0', label='data') - ax.plot(amps * 1e-3, Vm_eff_sub_even_peaks_fit, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: Vm_eff_sub_even_norm - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps - 1): - ax.plot(Qm * 1e5, Vm_eff_sub_even_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(0, namps - 1): - ax.plot(Qm * 1e5, Vm_eff_sub_even_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # fig, ax = plt.subplots(figsize=(15, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$V_{m,\ eff-sub-even-norm-diff}\ (-)$', fontsize=28) - # ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - # for i in range(namps - 1): - # ax.plot(Qm * 1e5, Vm_eff_sub_even_norm[i, :] - Vm_eff_sub_even_norm_fit[i, :], - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - # 6: hybrid gaussian-power law parameters - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff-sub-even-norm}\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='a') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='b') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='c') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='d') - ax.grid() - ax.legend(fontsize=28) - - - # 7: Vm_eff_predict - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff}\ prediction\ (mV)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, Vm_eff_predict[i, :], linewidth=2, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: Vm_eff_predict - Vm_eff - fig, ax = plt.subplots(figsize=(15, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff}\ difference\ (mV)$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, Vm_eff_diff[i, :], linewidth=2, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # # 9: RMSE & max absolute error - # fig, ax = plt.subplots(figsize=(15, 7)) - # ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - # ax.set_ylabel('$RMSE\ (mV)$', fontsize=28) - # ax.plot(amps * 1e-3, Vm_eff_rmse, linewidth=2, c='C0', - # label='$RMSE\ -\ entire\ Q_m\ range$') - # ax.plot(amps * 1e-3, Vm_eff_rmse_trueQ, linewidth=2, c='C1', - # label='$RMSE\ -\ realistic\ Q_m\ range$') - # ax.plot(amps * 1e-3, Vm_eff_maxdiff, '--', linewidth=2, c='C0', - # label='$MAE\ -\ entire\ Q_m\ range$') - # ax.plot(amps * 1e-3, Vm_eff_maxdiff_trueQ, '--', linewidth=2, c='C1', - # label='$MAE\ -\ realistic\ Q_m\ range$') - # ax.legend(fontsize=28) - # plt.tight_layout() - - - plt.show() - diff --git a/deprecated/curve fitting/fit_alphaheff.py b/deprecated/curve fitting/fit_alphaheff.py deleted file mode 100644 index 8b5c4b5..0000000 --- a/deprecated/curve fitting/fit_alphaheff.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-01-17 11:41:53 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-14 15:48:21 - -''' Detailed fitting strategy of the alpha_h_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -import scipy.special as sp -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): - normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) - normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) - return 2 * a * normpdf * normcdf + c - - -def gaussian(x, mu, sigma, a): - return a * np.exp(-((x - mu) / (2 * sigma))**2) - - -def Exponential(x, x0, b, c): - return b * np.exp(c * (x - x0)) - - -def Exp0(x, b, c): - return Exponential(x, 0.0, b, c) - - -def hybridExpGauss(x, mu, sigma, a, b, c): - return gaussian(x, mu, sigma, a) + Exponential(x, 0.0, b, -c) - - -def dualGauss(x, mu1, mu2, sigma1, sigma2, a1, a2): - return gaussian(x, mu1, sigma1, a1) + gaussian(x, mu2, sigma2, a2) - - - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing alphah_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - alphah_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - alphah_eff[i, :] = lookup['alpha_h_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - alphah_eff_sub = (alphah_eff - alphah_eff[baseline_ind, :]) - - # Peaks fitting on even profiles - print('fitting exponential law to profiles peaks') - alphah_eff_sub_peaks = np.amax(alphah_eff_sub, axis=1) - popt, _ = curve_fit(Exp0, amps, alphah_eff_sub_peaks, p0=(1.8e14, 3e-5)) - alphah_eff_sub_peaks_fit = Exp0(amps, *popt) - - # Normalization - print('normalizing subtracted profiles') - alphah_eff_sub_norm = alphah_eff_sub[1:, :]\ - / alphah_eff_sub_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting hybrid gaussian-exp law to normalized alphaheff-sub') - alphah_eff_sub_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 6)) - for i in range(namps - 1): - print(i) - popt, _ = curve_fit(dualGauss, Qm, alphah_eff_sub_norm[i], - bounds=([-np.infty, -np.infty, 0., 0., 0., 0.], - [0., 0., np.infty, np.infty, np.infty, np.infty]), - max_nfev=100000) - alphah_eff_sub_norm_fit[i, :] = dualGauss(Qm, *popt) - params[i, :] = np.asarray(popt) - - - - # Predict alphah_eff profiles - print('predicting alphah_eff by reconstructing from fits') - alphah_eff_sub_predict = np.vstack((np.zeros(nQ), alphah_eff_sub_norm_fit))\ - * alphah_eff_sub_peaks_fit.reshape(namps, 1) - alphah_eff_predict = alphah_eff_sub_predict + alphah_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - alphah_eff_trueQ = alphah_eff[:, i_trueQ_lb:i_trueQ_ub] - alphah_eff_predict_trueQ = alphah_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - alphah_eff_diff = alphah_eff_predict - alphah_eff - alphah_eff_diff_trueQ = alphah_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - alphah_eff_maxdiff = np.amax(np.abs(alphah_eff_diff), axis=1) - alphah_eff_maxdiff_trueQ = np.amax(np.abs(alphah_eff_diff_trueQ), axis=1) - alphah_eff_rmse = np.empty(namps) - alphah_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - alphah_eff_rmse[i] = rmse(alphah_eff[i, :], alphah_eff_predict[i, :]) - alphah_eff_rmse_trueQ[i] = rmse(alphah_eff_trueQ[i, :], alphah_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: alphah_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphah_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: alphah_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphah_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: alphah_eff_sub_peaks - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) - ax.scatter(amps * 1e-3, alphah_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') - ax.plot(amps * 1e-3, alphah_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: alphah_eff_sub_norm - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff-sub-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - ax.grid() - for i in range(namps - 1): - ax.plot(Qm * 1e2, alphah_eff_sub_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(namps - 1): - ax.plot(Qm * 1e2, alphah_eff_sub_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 6: parameters - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff-sub-norm}\ fit\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='mu1') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu2') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='sigma2') - ax.plot(amps[1:] * 1e-3, params[:, 4], label='a1') - ax.plot(amps[1:] * 1e-3, params[:, 5], label='a2') - ax.grid() - ax.legend(fontsize=28) - - - # 7: alphah_eff_predict - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphah_eff_predict[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: alphah_eff_predict - alphah_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphah_eff_diff[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 9: RMSE & max absolute error - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) - ax.plot(amps * 1e-3, alphah_eff_rmse * 1e-3, c='C0', - label='$RMSE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, alphah_eff_rmse_trueQ * 1e-3, c='C1', - label='$RMSE\ -\ realistic\ Q_m\ range$') - ax.plot(amps * 1e-3, alphah_eff_maxdiff * 1e-3, '--', c='C0', - label='$MAE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, alphah_eff_maxdiff_trueQ * 1e-3, '--', c='C1', - label='$MAE\ -\ realistic\ Q_m\ range$') - ax.legend(fontsize=28) - plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_alphameff.py b/deprecated/curve fitting/fit_alphameff.py deleted file mode 100644 index b34a1c7..0000000 --- a/deprecated/curve fitting/fit_alphameff.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-01-17 11:41:53 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-15 15:44:31 - -''' Detailed fitting strategy of the alpha_m_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def supraGauss(x, x0, a, b): - return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) - - -def absPow(x, x0, a, b, c): - return a * np.abs(x - x0)**b + c - - -def sigmoid(x, x0, a, b): - return 1 - 1 / (1 + np.abs((x - x0) / a)**b) - - -def hybridPowGauss(x, a, b, c, d): - return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) - - -def hybridPowSigmoid(x, a, b, c, d): - return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) - - -def piecewiseSigPowGauss(x, x0, a, b, c, d, e, f): - y = np.empty(x.size) - y[x < 0.] = sigmoid(x[x < 0.], x0, a, b) - y[x >= 0.] = hybridPowGauss(x[x >= 0.], c, d, e, f) - return y - - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing alpham_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - alpham_eff = np.empty((nfiles, nQ)) - Vm_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - alpham_eff[i, :] = lookup['alpha_m_eff'] - Vm_eff[i, :] = lookup['V_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - alpham_eff_sub = (alpham_eff - alpham_eff[baseline_ind, :]) - Vm_eff_sub = (Vm_eff - Vm_eff[baseline_ind, :]) - - # Suppressing Qm component - print('dividing by Qm') - alpham_eff_sub_even = alpham_eff_sub / Qm - - # Peaks fitting on even profiles - print('fitting power law to peaks of even profiles') - alpham_eff_sub_even_peaks = np.amax(alpham_eff_sub_even, axis=1) - # pguess_peaks = (1e4, 1.6, 3.5, 0.4) - # popt, _ = curve_fit(hybridPowSigmoid, amps, alpham_eff_sub_even_peaks, p0=pguess_peaks) - # alpham_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) - - # Normalization - print('normalizing even profiles') - alpham_eff_sub_even_norm = alpham_eff_sub_even[1:, :]\ - / alpham_eff_sub_even_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - # print('fitting hybrid gaussian-power law to normalized alphameff-sub-even') - # alpham_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) - # params = np.empty((namps - 1, 7)) - # for i in range(namps - 1): - # popt, _ = curve_fit(piecewiseSigPowGauss, Qm, alpham_eff_sub_even_norm[i, :], - # bounds=([-np.infty, -1., -np.infty, 0., 0., -np.infty, 0.], - # [np.infty, 0., 0., np.infty, np.infty, 0., np.infty])) - # alpham_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) - # params[i, :] = np.asarray(popt) - - # Predict alpham_eff profiles - # print('predicting alpham_eff by reconstructing from fits') - # alpham_eff_sub_even_predict = np.vstack((np.zeros(nQ), alpham_eff_sub_even_norm_fit))\ - # * alpham_eff_sub_even_peaks_fit.reshape(namps, 1) - # alpham_eff_sub_predict = alpham_eff_sub_even_predict * Qm - # alpham_eff_predict = alpham_eff_sub_predict + alpham_eff[baseline_ind, :] - - # # Analyze prediction accuracy, in wide and realistic charge ranges - # alpham_eff_trueQ = alpham_eff[:, i_trueQ_lb:i_trueQ_ub] - # alpham_eff_predict_trueQ = alpham_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - # alpham_eff_diff = alpham_eff_predict - alpham_eff - # alpham_eff_diff_trueQ = alpham_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - # alpham_eff_maxdiff = np.amax(np.abs(alpham_eff_diff), axis=1) - # alpham_eff_maxdiff_trueQ = np.amax(np.abs(alpham_eff_diff_trueQ), axis=1) - # alpham_eff_rmse = np.empty(namps) - # alpham_eff_rmse_trueQ = np.empty(namps) - # for i in range(namps): - # alpham_eff_rmse[i] = rmse(alpham_eff[i, :], alpham_eff_predict[i, :]) - # alpham_eff_rmse_trueQ[i] = rmse(alpham_eff_trueQ[i, :], alpham_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: alpham_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{m,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, alpham_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: alpham_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{m,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e5, Qmax * 1e5) - for i in range(namps): - ax.plot(Qm * 1e5, alpham_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # # 3: alpham_eff_sub_even - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # for i in range(namps): - # ax.plot(Qm * 1e2, alpham_eff_sub_even[i, :] * 1e-3, - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - # # 4: alpham_eff_sub_even_peaks - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - # ax.scatter(amps * 1e-3, alpham_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') - # ax.plot(amps * 1e-3, alpham_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') - # ax.legend(fontsize=28) - # plt.tight_layout() - - # # 5: alpham_eff_sub_even_norm - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) - # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # ax.grid() - # for i in range(namps - 1): - # ax.plot(Qm * 1e2, alpham_eff_sub_even_norm[i, :], - # c=mymap(rescale(amps[i], Amin, Amax))) - # for i in range(namps - 1): - # ax.plot(Qm * 1e2, alpham_eff_sub_even_norm_fit[i, :], '--', - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - - # # 6: piecewise function parameters - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) - # ax.plot(amps[1:] * 1e-3, params[:, 0], label='x0') - # ax.plot(amps[1:] * 1e-3, params[:, 1], label='a') - # ax.plot(amps[1:] * 1e-3, params[:, 2], label='b') - # ax.plot(amps[1:] * 1e-3, params[:, 3], label='c') - # ax.plot(amps[1:] * 1e-3, params[:, 4], label='d') - # ax.plot(amps[1:] * 1e-3, params[:, 5], label='e') - # ax.plot(amps[1:] * 1e-3, params[:, 6], label='f') - # ax.grid() - # ax.legend(fontsize=28) - - - # # 7: alpham_eff_predict - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{m,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # for i in range(namps): - # ax.plot(Qm * 1e2, alpham_eff_predict[i, :] * 1e-3, linewidth=2, - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - - # # 8: alpham_eff_predict - alpham_eff - # # fig, ax = plt.subplots(figsize=(21, 7)) - # # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # # ax.set_ylabel('$\\alpha_{m,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - # # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # # for i in range(namps): - # # ax.plot(Qm * 1e2, alpham_eff_diff[i, :] * 1e-3, linewidth=2, - # # c=mymap(rescale(amps[i], Amin, Amax))) - # # cbar = plt.colorbar(sm_amp) - # # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # # plt.tight_layout() - - # # 9: RMSE & max absolute error - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - # ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) - # ax.plot(amps * 1e-3, alpham_eff_rmse * 1e-3, linewidth=2, c='C0', - # label='$RMSE\ -\ entire\ Q_m\ range$') - # ax.plot(amps * 1e-3, alpham_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', - # label='$RMSE\ -\ realistic\ Q_m\ range$') - # ax.plot(amps * 1e-3, alpham_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', - # label='$MAE\ -\ entire\ Q_m\ range$') - # ax.plot(amps * 1e-3, alpham_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', - # label='$MAE\ -\ realistic\ Q_m\ range$') - # ax.legend(fontsize=28) - # plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_alphaneff.py b/deprecated/curve fitting/fit_alphaneff.py deleted file mode 100644 index 03f451a..0000000 --- a/deprecated/curve fitting/fit_alphaneff.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-02-07 18:52:13 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-14 15:48:31 - -''' Detailed fitting strategy of the alpha_n_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def supraGauss(x, x0, a, b): - return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) - - -def absPow(x, x0, a, b, c): - return a * np.abs(x - x0)**b + c - - -def sigmoid(x, x0, a, b): - return 1 - 1 / (1 + np.abs((x - x0) / a)**b) - - -def hybridPowGauss(x, a, b, c, d): - return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) - - -def hybridPowSigmoid(x, a, b, c, d): - return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) - - -def piecewiseSigPowGauss(x, x0, a, b, c, d, e, f): - y = np.empty(x.size) - y[x < 0.] = sigmoid(x[x < 0.], x0, a, b) - y[x >= 0.] = hybridPowGauss(x[x >= 0.], c, d, e, f) - return y - - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing alphan_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - alphan_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - alphan_eff[i, :] = lookup['alpha_n_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - alphan_eff_sub = (alphan_eff - alphan_eff[baseline_ind, :]) - - # Suppressing Qm component - print('dividing by Qm') - alphan_eff_sub_even = alphan_eff_sub / Qm - - # Peaks fitting on even profiles - print('fitting power law to peaks of even profiles') - alphan_eff_sub_even_peaks = np.amax(alphan_eff_sub_even, axis=1) - pguess_peaks = (1e4, 1.6, 3.5, 0.4) - popt, _ = curve_fit(hybridPowSigmoid, amps, alphan_eff_sub_even_peaks, p0=pguess_peaks) - alphan_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) - - # Normalization - print('normalizing even profiles') - alphan_eff_sub_even_norm = alphan_eff_sub_even[1:, :]\ - / alphan_eff_sub_even_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting hybrid gaussian-power law to normalized alphaneff-sub-even') - alphan_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 7)) - for i in range(namps - 1): - popt, _ = curve_fit(piecewiseSigPowGauss, Qm, alphan_eff_sub_even_norm[i, :], - bounds=([-np.infty, -1., -np.infty, 0., 0., -np.infty, 0.], - [np.infty, 0., 0., np.infty, np.infty, 0., np.infty])) - alphan_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) - params[i, :] = np.asarray(popt) - - # Predict alphan_eff profiles - print('predicting alphan_eff by reconstructing from fits') - alphan_eff_sub_even_predict = np.vstack((np.zeros(nQ), alphan_eff_sub_even_norm_fit))\ - * alphan_eff_sub_even_peaks_fit.reshape(namps, 1) - alphan_eff_sub_predict = alphan_eff_sub_even_predict * Qm - alphan_eff_predict = alphan_eff_sub_predict + alphan_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - alphan_eff_trueQ = alphan_eff[:, i_trueQ_lb:i_trueQ_ub] - alphan_eff_predict_trueQ = alphan_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - alphan_eff_diff = alphan_eff_predict - alphan_eff - alphan_eff_diff_trueQ = alphan_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - alphan_eff_maxdiff = np.amax(np.abs(alphan_eff_diff), axis=1) - alphan_eff_maxdiff_trueQ = np.amax(np.abs(alphan_eff_diff_trueQ), axis=1) - alphan_eff_rmse = np.empty(namps) - alphan_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - alphan_eff_rmse[i] = rmse(alphan_eff[i, :], alphan_eff_predict[i, :]) - alphan_eff_rmse_trueQ[i] = rmse(alphan_eff_trueQ[i, :], alphan_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: alphan_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphan_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: alphan_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphan_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: alphan_eff_sub_even - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphan_eff_sub_even[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 4: alphan_eff_sub_even_peaks - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - ax.scatter(amps * 1e-3, alphan_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') - ax.plot(amps * 1e-3, alphan_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: alphan_eff_sub_even_norm - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - ax.grid() - for i in range(namps - 1): - ax.plot(Qm * 1e2, alphan_eff_sub_even_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(namps - 1): - ax.plot(Qm * 1e2, alphan_eff_sub_even_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 6: piecewise function parameters - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='x0') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='a') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='b') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='c') - ax.plot(amps[1:] * 1e-3, params[:, 4], label='d') - ax.plot(amps[1:] * 1e-3, params[:, 5], label='e') - ax.plot(amps[1:] * 1e-3, params[:, 6], label='f') - ax.grid() - ax.legend(fontsize=28) - - - # 7: alphan_eff_predict - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, alphan_eff_predict[i, :] * 1e-3, linewidth=2, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: alphan_eff_predict - alphan_eff - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$\\alpha_{n,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # for i in range(namps): - # ax.plot(Qm * 1e2, alphan_eff_diff[i, :] * 1e-3, linewidth=2, - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - # 9: RMSE & max absolute error - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) - ax.plot(amps * 1e-3, alphan_eff_rmse * 1e-3, linewidth=2, c='C0', - label='$RMSE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, alphan_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', - label='$RMSE\ -\ realistic\ Q_m\ range$') - ax.plot(amps * 1e-3, alphan_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', - label='$MAE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, alphan_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', - label='$MAE\ -\ realistic\ Q_m\ range$') - ax.legend(fontsize=28) - plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_betaheff.py b/deprecated/curve fitting/fit_betaheff.py deleted file mode 100644 index 6f47e4c..0000000 --- a/deprecated/curve fitting/fit_betaheff.py +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-02-07 15:15:11 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-14 15:48:36 - -''' Detailed fitting strategy of the beta_h_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def gaussian(x, mu, sigma, a): - return a * np.exp(-((x - mu) / (2 * sigma))**2) - - -def gauss3(x, a1, mu1, sig1, a2, mu2, sig2, a3, mu3, sig3): - return gaussian(x, mu1, sig1, a1) + gaussian(x, mu2, sig2, a2) + gaussian(x, mu3, sig3, a3) - - -def sigmoid(x, x0, a, b): - return 1 - 1 / (1 + np.abs((x - x0) / a)**b) - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing betah_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - betah_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - betah_eff[i, :] = lookup['beta_h_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - betah_eff_sub = (betah_eff - betah_eff[baseline_ind, :]) - - # Peaks detection on subtracted profiles - print('finding peaks on subtracted profiles') - betah_eff_sub_peaks = np.amax(np.abs(betah_eff_sub), axis=1) - - # Normalization - print('normalizing subtracted profiles') - betah_eff_sub_norm = betah_eff_sub[1:, :]\ - / betah_eff_sub_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting "mexican hat" to normalized betaheff-sub') - betah_eff_sub_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 9)) - for i in range(namps - 1): - popt, _ = curve_fit(gauss3, Qm, betah_eff_sub_norm[i], - bounds=([0.0, -0.5, 0.0, -1.2, -0.2, 0., 0.0, 0.0, 0.0], - [0.3, -0.2, np.inf, -0.8, 0.0, np.inf, 0.1, 0.1, np.inf]), - max_nfev=100000) - betah_eff_sub_norm_fit[i, :] = gauss3(Qm, *popt) - params[i, :] = np.asarray(popt) - - # Predict betah_eff profiles - print('predicting betah_eff by reconstructing from fits') - betah_eff_sub_predict = np.vstack((np.zeros(nQ), betah_eff_sub_norm_fit))\ - * betah_eff_sub_peaks.reshape(namps, 1) - betah_eff_predict = betah_eff_sub_predict + betah_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - betah_eff_trueQ = betah_eff[:, i_trueQ_lb:i_trueQ_ub] - betah_eff_predict_trueQ = betah_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - betah_eff_diff = betah_eff_predict - betah_eff - betah_eff_diff_trueQ = betah_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - betah_eff_maxdiff = np.amax(np.abs(betah_eff_diff), axis=1) - betah_eff_maxdiff_trueQ = np.amax(np.abs(betah_eff_diff_trueQ), axis=1) - betah_eff_rmse = np.empty(namps) - betah_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - betah_eff_rmse[i] = rmse(betah_eff[i, :], betah_eff_predict[i, :]) - betah_eff_rmse_trueQ[i] = rmse(betah_eff_trueQ[i, :], betah_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: betah_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betah_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: betah_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betah_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: betah_eff_sub_peaks - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) - ax.scatter(amps * 1e-3, betah_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') - # ax.plot(amps * 1e-3, betah_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: betah_eff_sub_norm - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff-sub-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - ax.grid() - for i in range(namps - 1): - ax.plot(Qm * 1e2, betah_eff_sub_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(namps - 1): - ax.plot(Qm * 1e2, betah_eff_sub_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 6: parameters - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff-sub-norm}\ fit\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='a1') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu1') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='a2') - ax.plot(amps[1:] * 1e-3, params[:, 4], label='mu2') - ax.plot(amps[1:] * 1e-3, params[:, 5], label='sigma2') - ax.plot(amps[1:] * 1e-3, params[:, 6], label='a3') - ax.plot(amps[1:] * 1e-3, params[:, 7], label='mu3') - ax.plot(amps[1:] * 1e-3, params[:, 8], label='sigma3') - ax.grid() - ax.legend(fontsize=28) - - - # 7: betah_eff_predict - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betah_eff_predict[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: betah_eff_predict - betah_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betah_eff_diff[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 9: RMSE & max absolute error - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) - ax.plot(amps * 1e-3, betah_eff_rmse * 1e-3, c='C0', - label='$RMSE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betah_eff_rmse_trueQ * 1e-3, c='C1', - label='$RMSE\ -\ realistic\ Q_m\ range$') - ax.plot(amps * 1e-3, betah_eff_maxdiff * 1e-3, '--', c='C0', - label='$MAE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betah_eff_maxdiff_trueQ * 1e-3, '--', c='C1', - label='$MAE\ -\ realistic\ Q_m\ range$') - ax.legend(fontsize=28) - plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_betameff.py b/deprecated/curve fitting/fit_betameff.py deleted file mode 100644 index 8ce86db..0000000 --- a/deprecated/curve fitting/fit_betameff.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-02-06 14:20:03 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-14 15:48:41 - -''' Detailed fitting strategy of the beta_m_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def supraGauss(x, x0, a, b): - return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) - - -def absPow(x, x0, a, b, c): - return a * np.abs(x - x0)**b + c - - -def sigmoid(x, x0, a, b): - return 1 - 1 / (1 + np.abs((x - x0) / a)**b) - - -def hybridPowGauss(x, a, b, c, d): - return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) - - -def hybridPowSigmoid(x, a, b, c, d): - return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) - - -def piecewiseSigPowGauss(x, thr, x0, a, b, c, d, e, f): - y = np.empty(x.size) - y[x < thr] = hybridPowGauss(x[x < thr], c, d, e, f) - y[x >= thr] = sigmoid(x[x >= thr], x0, a, b) - return y - - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing betam_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - betam_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - betam_eff[i, :] = lookup['beta_m_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - betam_eff_sub = (betam_eff - betam_eff[baseline_ind, :]) - - # Suppressing Qm component - print('dividing by -Qm') - betam_eff_sub_even = - betam_eff_sub / Qm - - # Peaks fitting on even profiles - print('fitting power law to peaks of even profiles') - betam_eff_sub_even_peaks = np.amax(betam_eff_sub_even, axis=1) - pguess_peaks = (1e4, 1.6, 3.5, 0.4) - popt, _ = curve_fit(hybridPowSigmoid, amps, betam_eff_sub_even_peaks, p0=pguess_peaks) - betam_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) - - # Normalization - print('normalizing even profiles') - betam_eff_sub_even_norm = betam_eff_sub_even[1:, :]\ - / betam_eff_sub_even_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting hybrid gaussian-power law to normalized betameff-sub-even') - betam_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 8)) - for i in range(namps - 1): - popt, _ = curve_fit(piecewiseSigPowGauss, Qm, betam_eff_sub_even_norm[i, :], - bounds=([-0.5, -0.5, -1., -np.infty, 0., 0., -np.infty, 0.], - [0, 0.5, 0., 0., np.infty, np.infty, 0., np.infty])) - betam_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) - params[i, :] = np.asarray(popt) - - # Predict betam_eff profiles - print('predicting betam_eff by reconstructing from fits') - betam_eff_sub_even_predict = np.vstack((np.zeros(nQ), betam_eff_sub_even_norm_fit))\ - * betam_eff_sub_even_peaks_fit.reshape(namps, 1) - betam_eff_sub_predict = - betam_eff_sub_even_predict * Qm - betam_eff_predict = betam_eff_sub_predict + betam_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - betam_eff_trueQ = betam_eff[:, i_trueQ_lb:i_trueQ_ub] - betam_eff_predict_trueQ = betam_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - betam_eff_diff = betam_eff_predict - betam_eff - betam_eff_diff_trueQ = betam_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - betam_eff_maxdiff = np.amax(np.abs(betam_eff_diff), axis=1) - betam_eff_maxdiff_trueQ = np.amax(np.abs(betam_eff_diff_trueQ), axis=1) - betam_eff_rmse = np.empty(namps) - betam_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - betam_eff_rmse[i] = rmse(betam_eff[i, :], betam_eff_predict[i, :]) - betam_eff_rmse_trueQ[i] = rmse(betam_eff_trueQ[i, :], betam_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: betam_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betam_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: betam_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betam_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: betam_eff_sub_even - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betam_eff_sub_even[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 4: betam_eff_sub_even_peaks - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) - ax.scatter(amps * 1e-3, betam_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') - ax.plot(amps * 1e-3, betam_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: betam_eff_sub_even_norm - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - ax.grid() - for i in range(namps - 1): - ax.plot(Qm * 1e2, betam_eff_sub_even_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(namps - 1): - ax.plot(Qm * 1e2, betam_eff_sub_even_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 6: piecewise function parameters - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='thr') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='x0') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='a') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='b') - ax.plot(amps[1:] * 1e-3, params[:, 4], label='c') - ax.plot(amps[1:] * 1e-3, params[:, 5], label='d') - ax.plot(amps[1:] * 1e-3, params[:, 6], label='e') - ax.plot(amps[1:] * 1e-3, params[:, 7], label='f') - ax.grid() - ax.legend(fontsize=28) - - - # 7: betam_eff_predict - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betam_eff_predict[i, :] * 1e-3, linewidth=2, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: betam_eff_predict - betam_eff - # fig, ax = plt.subplots(figsize=(21, 7)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - # ax.set_ylabel('$\\beta_{m,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - # for i in range(namps): - # ax.plot(Qm * 1e2, betam_eff_diff[i, :] * 1e-3, linewidth=2, - # c=mymap(rescale(amps[i], Amin, Amax))) - # cbar = plt.colorbar(sm_amp) - # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - # plt.tight_layout() - - # 9: RMSE & max absolute error - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) - ax.plot(amps * 1e-3, betam_eff_rmse * 1e-3, linewidth=2, c='C0', - label='$RMSE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betam_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', - label='$RMSE\ -\ realistic\ Q_m\ range$') - ax.plot(amps * 1e-3, betam_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', - label='$MAE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betam_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', - label='$MAE\ -\ realistic\ Q_m\ range$') - ax.legend(fontsize=28) - plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_betaneff.py b/deprecated/curve fitting/fit_betaneff.py deleted file mode 100644 index 8df40cd..0000000 --- a/deprecated/curve fitting/fit_betaneff.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-02-07 18:55:49 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-02-14 15:48:50 - -''' Detailed fitting strategy of the beta_n_eff profiles ''' - -import os -import ntpath -import re -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.optimize import curve_fit -import scipy.special as sp -from utils import OpenFilesDialog, rescale, rmse, find_nearest - - -def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): - normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) - normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) - return 2 * a * normpdf * normcdf + c - - -def gaussian(x, mu, sigma, a): - return a * np.exp(-((x - mu) / (2 * sigma))**2) - - -def Exponential(x, x0, b, c): - return b * np.exp(c * (x - x0)) - - -def Exp0(x, b, c): - return Exponential(x, 0.0, b, c) - - -def hybridExpGauss(x, mu, sigma, a, b, c): - return gaussian(x, mu, sigma, a) + Exponential(x, 0.0, b, -c) - - -def dualGauss(x, mu1, mu2, sigma1, sigma2, a1, a2): - return gaussian(x, mu1, sigma1, a1) + gaussian(x, mu2, sigma2, a2) - - - - -# Select data files (PKL) -lookup_root = '../Output/lookups extended 0.35MHz/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -plot_bool = 1 - -nQ = 300 -baseline_ind = -1 - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing betan_eff profiles from lookup tables') - nfiles = len(lookup_filepaths) - - # Initialize coefficients matrices - amps = np.empty(nfiles) - betan_eff = np.empty((nfiles, nQ)) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - betan_eff[i, :] = lookup['beta_n_eff'] - - if baseline_ind == -1: - print('Error: no baseline profile selected') - else: - - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - namps = nfiles - - i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) - i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) - - - # Baseline subtraction - print('subtracting baseline (Adrive = 0) from profiles') - betan_eff_sub = (betan_eff - betan_eff[baseline_ind, :]) - - # Peaks fitting on even profiles - print('fitting exponential law to profiles peaks') - betan_eff_sub_peaks = np.amax(betan_eff_sub, axis=1) - popt, _ = curve_fit(Exp0, amps, betan_eff_sub_peaks, p0=(1.8e14, 3e-5)) - betan_eff_sub_peaks_fit = Exp0(amps, *popt) - - # Normalization - print('normalizing subtracted profiles') - betan_eff_sub_norm = betan_eff_sub[1:, :]\ - / betan_eff_sub_peaks[1:].reshape(namps - 1, 1) - - # Normalized profiles fitting - print('fitting hybrid gaussian-exp law to normalized betaneff-sub') - betan_eff_sub_norm_fit = np.empty((namps - 1, nQ)) - params = np.empty((namps - 1, 6)) - for i in range(namps - 1): - print(i) - popt, _ = curve_fit(dualGauss, Qm, betan_eff_sub_norm[i], - bounds=([-np.infty, -np.infty, 0., 0., 0., 0.], - [0., 0., np.infty, np.infty, np.infty, np.infty]), - max_nfev=100000) - betan_eff_sub_norm_fit[i, :] = dualGauss(Qm, *popt) - params[i, :] = np.asarray(popt) - - - - # Predict betan_eff profiles - print('predicting betan_eff by reconstructing from fits') - betan_eff_sub_predict = np.vstack((np.zeros(nQ), betan_eff_sub_norm_fit))\ - * betan_eff_sub_peaks_fit.reshape(namps, 1) - betan_eff_predict = betan_eff_sub_predict + betan_eff[baseline_ind, :] - - # Analyze prediction accuracy, in wide and realistic charge ranges - betan_eff_trueQ = betan_eff[:, i_trueQ_lb:i_trueQ_ub] - betan_eff_predict_trueQ = betan_eff_predict[:, i_trueQ_lb:i_trueQ_ub] - betan_eff_diff = betan_eff_predict - betan_eff - betan_eff_diff_trueQ = betan_eff_diff[:, i_trueQ_lb:i_trueQ_ub] - betan_eff_maxdiff = np.amax(np.abs(betan_eff_diff), axis=1) - betan_eff_maxdiff_trueQ = np.amax(np.abs(betan_eff_diff_trueQ), axis=1) - betan_eff_rmse = np.empty(namps) - betan_eff_rmse_trueQ = np.empty(namps) - for i in range(namps): - betan_eff_rmse[i] = rmse(betan_eff[i, :], betan_eff_predict[i, :]) - betan_eff_rmse_trueQ[i] = rmse(betan_eff_trueQ[i, :], betan_eff_predict_trueQ[i, :]) - - - if plot_bool == 1: - - # Plotting - print('plotting') - - mymap = cm.get_cmap('jet') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - - # 1: betan_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betan_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 2: betan_eff_sub - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff-sub}\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betan_eff_sub[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 3: betan_eff_sub_peaks - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) - ax.scatter(amps * 1e-3, betan_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') - ax.plot(amps * 1e-3, betan_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') - ax.legend(fontsize=28) - plt.tight_layout() - - # 5: betan_eff_sub_norm - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff-sub-norm}\ (-)$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - ax.grid() - for i in range(namps - 1): - ax.plot(Qm * 1e2, betan_eff_sub_norm[i, :], - c=mymap(rescale(amps[i], Amin, Amax))) - for i in range(namps - 1): - ax.plot(Qm * 1e2, betan_eff_sub_norm_fit[i, :], '--', - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 6: parameters - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff-sub-norm}\ fit\ params$', fontsize=28) - ax.plot(amps[1:] * 1e-3, params[:, 0], label='mu1') - ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu2') - ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') - ax.plot(amps[1:] * 1e-3, params[:, 3], label='sigma2') - ax.plot(amps[1:] * 1e-3, params[:, 4], label='a1') - ax.plot(amps[1:] * 1e-3, params[:, 5], label='a2') - ax.grid() - ax.legend(fontsize=28) - - - # 7: betan_eff_predict - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betan_eff_predict[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - - # 8: betan_eff_predict - betan_eff - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff}\ difference\ (ms^{-1})$', fontsize=28) - ax.set_xlim(Qmin * 1e2, Qmax * 1e2) - for i in range(namps): - ax.plot(Qm * 1e2, betan_eff_diff[i, :] * 1e-3, - c=mymap(rescale(amps[i], Amin, Amax))) - cbar = plt.colorbar(sm_amp) - cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) - plt.tight_layout() - - # 9: RMSE & max absolute error - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) - ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) - ax.plot(amps * 1e-3, betan_eff_rmse * 1e-3, c='C0', - label='$RMSE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betan_eff_rmse_trueQ * 1e-3, c='C1', - label='$RMSE\ -\ realistic\ Q_m\ range$') - ax.plot(amps * 1e-3, betan_eff_maxdiff * 1e-3, '--', c='C0', - label='$MAE\ -\ entire\ Q_m\ range$') - ax.plot(amps * 1e-3, betan_eff_maxdiff_trueQ * 1e-3, '--', c='C1', - label='$MAE\ -\ realistic\ Q_m\ range$') - ax.legend(fontsize=28) - plt.tight_layout() - - plt.show() - diff --git a/deprecated/curve fitting/fit_eff_coeffs.py b/deprecated/curve fitting/fit_eff_coeffs.py deleted file mode 100644 index b7ab4f4..0000000 --- a/deprecated/curve fitting/fit_eff_coeffs.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-01-15 18:08:06 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-01-20 10:18:50 - -''' Plot the profiles of the 9 charge-dependent "effective" HH coefficients, -along with a fitted mathematical expression ''' - - -import os -import ntpath -import pickle -import matplotlib.pyplot as plt -import numpy as np -# import math as math -import scipy.special as sp -from scipy.optimize import curve_fit -from utils import OpenFilesDialog, rsquared - - - - - -def fit_amn(x, a, b, c, d): - # return a * c * (x - c - b) * np.exp((x - b) / c) - x + d - return a * c**2 * sp.spence(1 - (-np.exp(-b / c) * (np.exp(x / c) - np.exp(b / c)))) + d - - -# -------------------------------------------------------------------- - -def gaus(x, a, x0, sigma): - return a * np.exp(- (x - x0)**2 / (2 * sigma**2)) - - -def compexp(x, a, b, c, d, e, f): - return (a * x + b) / (c * np.exp(d * x + e) + f) - - -def expgrowth(x, x0, a): - return np.exp(a * (x - x0)) - - -def expdecay(x, x0, a): - return np.exp(-a * (x - x0)) - - -def sigmoid(x, x0, a, b): - return a / (1 + np.exp(- b * (x - x0))) - - -def dualexp(x, x1, x2, a, b): - return np.exp(a * (x - x1)) + np.exp(- b * (x - x2)) - - -def dualregime(x, x0, a, b): - return a * (x - x0) / (np.exp(- b * (x - x0)) - 1) - - -def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): - normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) - normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) - return 2 * a * normpdf * normcdf + c - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz linear amplitude/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepath = OpenFilesDialog(lookup_absroot, 'pkl') - -# Check dialog output -if not lookup_filepath: - print('error: no lookup table selected') -elif len(lookup_filepath) > 1: - print('error multiple lookup tables selected') -else: - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepath[0]) - print('loading lookup table') - with open(lookup_filepath[0], 'rb') as fh: - lookup = pickle.load(fh) - - print('finding best fits with analytical expressions') - - # Vm_eff - print('Vm_eff') - z = np.polyfit(lookup['Q'], lookup['V_eff'], 3) - p = np.poly1d(z) - Veff_fit = p(lookup['Q']) - r2 = rsquared(lookup['V_eff'], Veff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$V_{m,\ eff}\ (mV)$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['V_eff'], linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, Veff_fit, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # alpha_m_eff - print('alpha_m_eff') - # z = np.polyfit(lookup['Q'], lookup['alpha_m_eff'], 5) - # p = np.poly1d(z) - # alpha_m_eff_fit = p(lookup['Q']) - popt, _ = curve_fit(fit_amn, lookup['Q'], lookup['alpha_m_eff'], maxfev=100000) - alpha_m_eff_fit = fit_amn(lookup['Q'], *popt) - r2 = rsquared(lookup['alpha_m_eff'], alpha_m_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{m,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['alpha_m_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, alpha_m_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # beta_m_eff - print('beta_m_eff') - pguess = (-0.7, 0.2, 3, 5000) - beta_m_eff_guess = skewed_gaussian(lookup['Q'], *pguess) - popt, _ = curve_fit(skewed_gaussian, lookup['Q'], lookup['beta_m_eff'], p0=pguess) - beta_m_eff_fit = skewed_gaussian(lookup['Q'], *popt) - r2 = rsquared(lookup['beta_m_eff'], beta_m_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{m,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['beta_m_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, beta_m_eff_fit * 1e-3, linewidth=2, label='fit') - ax.plot(lookup['Q'] * 1e2, beta_m_eff_guess * 1e-3, linewidth=2, label='guess') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # alpha_h_eff - print('alpha_h_eff') - pguess = (-0.7, 0.2, 3, 20000) - alpha_h_eff_guess = skewed_gaussian(lookup['Q'], *pguess) - popt, _ = curve_fit(skewed_gaussian, lookup['Q'], lookup['alpha_h_eff'], p0=pguess) - alpha_h_eff_fit = skewed_gaussian(lookup['Q'], *popt) - r2 = rsquared(lookup['alpha_h_eff'], alpha_h_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{h,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['alpha_h_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, alpha_h_eff_fit * 1e-3, linewidth=2, label='fit') - ax.plot(lookup['Q'] * 1e2, alpha_h_eff_guess * 1e-3, label='guess') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # beta_h_eff - print('beta_h_eff') - popt, _ = curve_fit(sigmoid, lookup['Q'], lookup['beta_h_eff'], p0=(-0.1, 4000, 20)) - beta_h_eff_fit = sigmoid(lookup['Q'], *popt) - r2 = rsquared(lookup['beta_h_eff'], beta_h_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{h,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['beta_h_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, beta_h_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # alpha_n_eff - print('alpha_n_eff') - popt, _ = curve_fit(gaus, lookup['Q'], lookup['alpha_n_eff']) - alpha_n_eff_fit = gaus(lookup['Q'], *popt) - r2 = rsquared(lookup['alpha_n_eff'], alpha_n_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\alpha_{n,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['alpha_n_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, alpha_n_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # beta_n_eff - print('beta_n_eff') - popt, _ = curve_fit(expdecay, lookup['Q'], lookup['beta_n_eff']) - beta_n_eff_fit = expdecay(lookup['Q'], *popt) - r2 = rsquared(lookup['beta_n_eff'], beta_n_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$\\beta_{n,\ eff}\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['beta_n_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, beta_n_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # pinf_over_taup_eff - print('pinf_over_taup_eff') - popt, _ = curve_fit(expgrowth, lookup['Q'], lookup['pinf_over_taup_eff']) - pinf_over_taup_eff_fit = expgrowth(lookup['Q'], *popt) - r2 = rsquared(lookup['pinf_over_taup_eff'], pinf_over_taup_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$p_{\\infty} / \\tau_p\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['pinf_over_taup_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, pinf_over_taup_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - # inv_taup_eff - print('inv_taup_eff') - popt, _ = curve_fit(dualexp, lookup['Q'], lookup['inv_taup_eff'], p0=(-0.2, -0.04, 15, 15)) - inv_taup_eff_fit = dualexp(lookup['Q'], *popt) - r2 = rsquared(lookup['inv_taup_eff'], inv_taup_eff_fit) - fig, ax = plt.subplots(figsize=(21, 7)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) - ax.set_ylabel('$1 / \\tau_p\ (ms^{-1})$', fontsize=28) - ax.plot(lookup['Q'] * 1e2, lookup['inv_taup_eff'] * 1e-3, linewidth=2, label='data') - ax.plot(lookup['Q'] * 1e2, inv_taup_eff_fit * 1e-3, linewidth=2, label='fit') - ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) - ax.legend() - - plt.show() diff --git a/deprecated/kriging/lhs.py b/deprecated/kriging/lhs.py deleted file mode 100644 index 6d6b221..0000000 --- a/deprecated/kriging/lhs.py +++ /dev/null @@ -1,21 +0,0 @@ -import numpy as np -from pyDOE import lhs - - -def lh2DWithCorners(n, x1_range, x2_range, crtrn): - ''' This function generates a 2D Latin Hypercube distribution vector, scaled up - to the input domain range, and containing the 4 corners of the domain. - - :param n: number of samples to generate (including the 4 corners) - :param x1_range: range of the 1st input variable - :param x2_range: range of the 2nd input variable - :param crtrn: criterion for Latin Hypercube sampling - :return: 2xn array of generated samples - ''' - - lh = lhs(2, samples=(n - 4), criterion=crtrn) - corners = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]) - lhc = np.vstack((lh, corners)) - lhc[:, 0] = lhc[:, 0] * (x1_range[1] - x1_range[0]) + x1_range[0] - lhc[:, 1] = lhc[:, 1] * (x2_range[1] - x2_range[0]) + x2_range[0] - return lhc diff --git a/deprecated/kriging/test_pykrige_Vmeff.py b/deprecated/kriging/test_pykrige_Vmeff.py deleted file mode 100644 index 77f9751..0000000 --- a/deprecated/kriging/test_pykrige_Vmeff.py +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-02-15 15:59:37 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-04-26 12:08:41 - -''' Fit a kriging model to a discrete 2D map of effective potentials - for various charges and acoustic amplitudes, and use kriging predictor - to generate a new 2D map of effective potentials within the original input range. ''' - -import os -import re -import ntpath -import pickle -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import numpy as np -from scipy.spatial.distance import pdist, squareform -from utils import OpenFilesDialog, rescale, rmse -from pykrige.ok import OrdinaryKriging -import pykrige.kriging_tools as kt - - -class Variable: - ''' dummy class to contain information about the variable ''' - - name = '' - unit = '' - lookup = '' - factor = 1. - max_error = 0. - - def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): - self.name = var_name - self.unit = var_unit - self.factor = var_factor - self.lookup = var_lookup - self.max_error = var_max_error - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz charge extended/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') - -# Set data variable and Kriging parameters -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e-10) -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-8) -nQ_sparse_target = 30 -namps_sparse_target = 10 - -plot_all = True - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing lookup tables') - nfiles = len(lookup_filepaths) - amps = np.empty(nfiles) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - if i == 0: - Qm = lookup['Q'] - nQ = np.size(Qm) - var = np.empty((nfiles, nQ)) - var[i, :] = lookup[varinf.lookup] - else: - if np.array_equal(Qm, lookup['Q']): - var[i, :] = lookup[varinf.lookup] - else: - print('Error: charge vector not consistent') - - # Compute data metrics - namps = amps.size - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - varmin = np.amin(var) - varmax = np.amax(var) - print('Initial data:', nQ, 'charges,', namps, 'amplitudes') - - # Resample arrays - print('resampling arrays') - assert nQ_sparse_target <= nQ and namps_sparse_target <= namps - Qm_sampling_factor = int(nQ / nQ_sparse_target) - amps_sampling_factor = int(namps / namps_sparse_target) - Qm_sparse = Qm[::Qm_sampling_factor] - amps_sparse = amps[::amps_sampling_factor] - nQ_sparse = Qm_sparse.size - namps_sparse = amps_sparse.size - var_sparse = var[::amps_sampling_factor, ::Qm_sampling_factor] - Qmin_sparse = np.amin(Qm_sparse) - Qmax_sparse = np.amax(Qm_sparse) - Amin_sparse = np.amin(amps_sparse) - Amax_sparse = np.amax(amps_sparse) - print('Sparse data:', nQ_sparse, 'charges,', namps_sparse, 'amplitudes') - - # Normalize and serialize - print('normalizing and serializing sparse data') - Qm_sparse_norm = rescale(Qm_sparse, Qmin_sparse, Qmax_sparse) - amps_sparse_norm = rescale(amps_sparse, Amin_sparse, Amax_sparse) - Qm_sparse_norm_grid, amps_sparse_norm_grid = np.meshgrid(Qm_sparse_norm, amps_sparse_norm) - Qm_sparse_norm_ser = np.reshape(Qm_sparse_norm_grid, nQ_sparse * namps_sparse) - amps_sparse_norm_ser = np.reshape(amps_sparse_norm_grid, nQ_sparse * namps_sparse) - var_sparse_ser = np.reshape(var_sparse, nQ_sparse * namps_sparse) - - # Compute normalized distance matrix and data semivariogram - # print('computing normalized distance matrix and data semi-variogram') - # norm_dist = squareform(pdist(np.array([amps_sparse_norm_ser, Qm_sparse_norm_ser]).transpose())) - # N = norm_dist.shape[0] - # norm_dist_ub = 1.6 - # assert np.amax(norm_dist) < norm_dist_ub,\ - # 'Error: max normalized distance greater than semi-variogram upper bound' - # bw = 0.1 # lag bandwidth - # lags = np.arange(0, 1.6, bw) # lag array - # nlags = lags.size - # sv = np.empty(nlags) - # for k in range(nlags): - # # print('lag = ', lags[k]) - # Z = list() - # for i in range(N): - # for j in range(i + 1, N): - # if norm_dist[i, j] >= lags[k] - bw and norm_dist[i, j] <= lags[k] + bw: - # Z.append((var_sparse_ser[i] - var_sparse_ser[j])**2.0) - # sv[k] = np.sum(Z) / (2.0 * len(Z)) - - - # Fit kriging model - print('fitting kriging model to sparse data') - OK = OrdinaryKriging(amps_sparse_norm_ser, Qm_sparse_norm_ser, var_sparse_ser, - variogram_model='linear') - - # Proof-of-concept: dummy prediction at known values of charge and amplitude - print('re-computing sparse data from kriging predictor') - var_sparse_krig, _ = OK.execute('grid', rescale(amps_sparse, Amin_sparse, Amax_sparse), - rescale(Qm_sparse, Qmin_sparse, Qmax_sparse)) - var_sparse_krig = var_sparse_krig.transpose() - var_sparse_max_abs_error = np.amax(np.abs(var_sparse - var_sparse_krig)) * varinf.factor - assert var_sparse_max_abs_error < varinf.max_error,\ - 'High Kriging error in training set ({:.2e} {})'.format(var_sparse_max_abs_error, - varinf.unit) - - # Predict data at unknown values - print('re-computing original data from kriging predictor') - var_krig, var_krig_ss = OK.execute('grid', rescale(amps, Amin, Amax), rescale(Qm, Qmin, Qmax)) - var_krig = var_krig.transpose() - var_krig_ss = var_krig_ss.transpose() - var_krig_std = np.sqrt(var_krig_ss) - var_krig_std_min = np.amin(var_krig_std) - var_krig_std_max = np.amax(var_krig_std) - varmin = np.amin([varmin, np.amin(var_krig)]) - varmax = np.amin([varmax, np.amax(var_krig)]) - var_levels = np.linspace(varmin, varmax, 20) * varinf.factor - var_abs_diff = np.abs(var - var_krig) - var_abs_diff_max = np.amax(var_abs_diff) - var_diff_levels = np.linspace(0., np.amax(var_abs_diff), 20) * varinf.factor - var_std_levels = np.linspace(0., np.amax(var_krig_std_max), 20) * varinf.factor - - # Compare original and predicted profiles - print('comparing original and predicted profiles') - var_rmse = rmse(var, var_krig) * varinf.factor - var_max_abs_error = np.amax(np.abs(var - var_krig)) * varinf.factor - print('RMSE = {:.2f} {}, MAE = {:.2f} {}'.format(var_rmse, varinf.unit, - var_max_abs_error, varinf.unit)) - - # Plotting - print('plotting') - - mymap = cm.get_cmap('viridis') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, - varmax * varinf.factor)) - sm_var._A = [] - sm_var_diff = plt.cm.ScalarMappable(cmap=mymap, - norm=plt.Normalize(0., var_abs_diff_max * varinf.factor)) - sm_var_diff._A = [] - sm_var_std = plt.cm.ScalarMappable(cmap=mymap, - norm=plt.Normalize(var_krig_std_min * varinf.factor, - var_krig_std_max * varinf.factor)) - sm_var_std._A = [] - - if plot_all: - - - # True function map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_title('$' + varinf.name + '(Q_m,\ A_{drive})$ map', fontsize=20) - ax.contourf(Qm * 1e5, amps * 1e-3, var * varinf.factor, levels=var_levels, cmap='viridis') - xgrid, ygrid, = np.meshgrid(Qm * 1e5, amps * 1e-3) - ax.scatter(xgrid, ygrid, c='black', s=5) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - - # True function profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('$' + varinf.name + '(Q_m)$ for different amplitudes', fontsize=20) - for i in range(namps): - ax.plot(Qm * 1e5, var[i, :] * varinf.factor, c=mymap(rescale(amps[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - - # Sparse function profiles - # fig, ax = plt.subplots(figsize=(10, 6)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - # ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - # ax.set_title('sparse $' + varinf.name + '(Q_m)$ for different amplitudes', fontsize=20) - # for i in range(namps_sparse): - # ax.plot(Qm_sparse * 1e5, var_sparse[i, :] * varinf.factor, - # c=mymap(rescale(amps_sparse[i], Amin, Amax))) - # fig.subplots_adjust(right=0.85) - # cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - # fig.add_axes() - # fig.colorbar(sm_amp, cax=cbar_ax) - # cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - - # 3: sparse var(Qm, Adrive) scattered map - # fig, ax = plt.subplots(figsize=(10, 6)) - # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - # ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - # ax.set_title('sparse $' + varinf.name + '(Q_m,\ A_{drive})$ scattered map', fontsize=20) - # xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) - # ax.scatter(xgrid, ygrid, c=var_sparse * varinf.factor, cmap='viridis') - # fig.subplots_adjust(right=0.85) - # cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - # fig.add_axes() - # fig.colorbar(sm_var, cax=cbar_ax) - # cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - - # # 4: data semivariogram - # fig, ax = plt.subplots(figsize=(10, 6)) - # ax.set_xlabel('Normalized lag', fontsize=20) - # ax.set_ylabel('Semivariance', fontsize=20) - # ax.set_title('Semivariogram', fontsize=20) - # ax.plot(lags, sv, '.-') - - # Estimate map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_title('$' + varinf.name + '(Q_m,\ A_{drive})$ estimate map', fontsize=20) - ax.contourf(Qm * 1e5, amps * 1e-3, var_krig * varinf.factor, levels=var_levels, - cmap='viridis') - xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) - ax.scatter(xgrid, ygrid, c='black', s=5) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - - - # 5: Prediction: more dense Vm_krig(Qm) plots for each Adrive - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Kriging: prediction of original $' + varinf.name + '(Q_m)$ profiles', - fontsize=20) - for i in range(namps): - ax.plot(Qm * 1e5, var_krig[i, :] * varinf.factor, c=mymap(rescale(amps[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - - # 6: Vm(Qm, Adrive) kriging error contour map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Kriging error: $' + varinf.name + '(Q_m,\ A_{drive})$ contour map', fontsize=20) - ax.contourf(Qm * 1e5, amps * 1e-3, var_abs_diff * varinf.factor, levels=var_diff_levels, - cmap='viridis') - xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) - ax.scatter(xgrid, ygrid, c='black', s=5) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var_diff, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ abs.\ error\ (' + varinf.unit + ')$', fontsize=20) - - # 6: Vm(Qm, Adrive) kriging predicted error contour map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Kriging predicted error: $' + varinf.name + '(Q_m,\ A_{drive})$ contour map', fontsize=20) - ax.contourf(Qm * 1e5, amps * 1e-3, var_krig_std * varinf.factor, cmap='viridis') - xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) - ax.scatter(xgrid, ygrid, c='black', s=5) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var_std, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ abs.\ error\ (' + varinf.unit + ')$', fontsize=20) - - plt.show() diff --git a/deprecated/kriging/test_pykriging1D.py b/deprecated/kriging/test_pykriging1D.py deleted file mode 100644 index 35fe91c..0000000 --- a/deprecated/kriging/test_pykriging1D.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-04-24 11:04:39 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-05-26 13:44:14 - -''' Predict a 1D Vmeff profile using the PyKriging module. ''' - -import pickle -import numpy as np -import matplotlib.pyplot as plt -from pyKriging.krige import kriging - - -class Variable: - ''' dummy class to contain information about the variable ''' - - name = '' - unit = '' - lookup = '' - factor = 1. - max_error = 0. - - def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): - self.name = var_name - self.unit = var_unit - self.factor = var_factor - self.lookup = var_lookup - self.max_error = var_max_error - - -# Set data variable and Kriging parameters -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-1) -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) -# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) - - -# Define true function by interpolation from specific profile -def f(x): - return np.interp(x, Qm, xvect) - - -# Load coefficient profile -dirpath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/' -filepath = dirpath + 'lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' -filepath0 = dirpath + 'lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' -with open(filepath, 'rb') as fh: - lookup = pickle.load(fh) - Qm = lookup['Q'] - xvect = lookup[varinf.lookup] -with open(filepath0, 'rb') as fh: - lookup = pickle.load(fh) - xvect0 = lookup[varinf.lookup] - -# xvect = xvect - xvect0 - - -print('defining estimation vector') -x = np.atleast_2d(np.linspace(-150., 150., 1000) * 1e-5).T -y = f(x).ravel() - -print('defining prediction vector') -X0 = np.atleast_2d(np.linspace(-150., 150., 10) * 1e-5).T -Y0 = f(X0).ravel() - -print('creating kriging model') -k = kriging(X0, Y0) - -print('training kriging model') -k.train() - -print('predicting') -y_pred0 = np.array([k.predict(xx) for xx in x]) - -X = X0 -Y = Y0 - -numiter = 10 -for i in range(numiter): - print('Infill iteration {0} of {1}....'.format(i + 1, numiter)) - newpoints = k.infill(1, method='error') - for point in newpoints: - newX = k.inversenormX(point) - newY = f(newX)[0] - print('adding point ({:.3f}, {:.3f})'.format(newX[0] * 1e5, newY * varinf.factor)) - X = np.append(X, [newX], axis=0) - Y = np.append(Y, newY) - k.addPoint(newX, newY, norm=True) - k.train() - -y_pred = np.array([k.predict(xx) for xx in x]) - -fig, ax = plt.subplots() -ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'true function') -ax.plot(X0 * 1e5, Y0 * varinf.factor, 'r.', markersize=10, label=u'Initial observations') -ax.plot(x * 1e5, y_pred0 * varinf.factor, 'b-', label=u'Initial prediction') -ax.set_xlabel('$Q_m\ (nC/cm^2)$') -ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') -ax.legend() - - -fig, ax = plt.subplots() -ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'true function') -ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final observations') -ax.plot(x * 1e5, y_pred * varinf.factor, 'b-', label=u'Final prediction') -ax.set_xlabel('$Q_m\ (nC/cm^2)$') -ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') -ax.legend() - - -plt.show() - diff --git a/deprecated/kriging/test_pykriging2D.py b/deprecated/kriging/test_pykriging2D.py deleted file mode 100644 index c98bf1f..0000000 --- a/deprecated/kriging/test_pykriging2D.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Theo Lemaire -# @Date: 2017-04-24 11:04:39 -# @Email: theo.lemaire@epfl.ch -# @Last Modified by: Theo Lemaire -# @Last Modified time: 2017-05-26 14:30:02 - -''' Predict a 1D Vmeff profile using the PyKriging module. ''' - -import os, ntpath -import pickle -import re -import numpy as np -from scipy.interpolate import griddata -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from pyKriging.krige import kriging -from utils import OpenFilesDialog, rescale, rmse - - -class Variable: - ''' dummy class to contain information about the variable ''' - - name = '' - unit = '' - lookup = '' - factor = 1. - max_error = 0. - - def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): - self.name = var_name - self.unit = var_unit - self.factor = var_factor - self.lookup = var_lookup - self.max_error = var_max_error - - -# Set data variable and Kriging parameters -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-1) -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) -# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) - - -# Define true function by interpolation from specific profile -def f(x): - return griddata(points, values, x, method='linear', rescale=True) - - -# Select data files (PKL) -lookup_root = '../Output/lookups 0.35MHz charge extended/' -lookup_absroot = os.path.abspath(lookup_root) -lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') -rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') -pltdir = 'C:/Users/admin/Desktop/PyKriging output/' - -# Set data variable and Kriging parameters -varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1.0) -# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e4) -# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) -# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) -# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) - -# Check dialog output -if not lookup_filepaths: - print('error: no lookup table selected') -else: - print('importing lookup tables') - nfiles = len(lookup_filepaths) - amps = np.empty(nfiles) - - for i in range(nfiles): - - # Load lookup table - lookup_filename = ntpath.basename(lookup_filepaths[i]) - mo = rgxp.fullmatch(lookup_filename) - if not mo: - print('Error: lookup file does not match regular expression pattern') - else: - # Retrieve stimulus parameters - Fdrive = float(mo.group(2)) * 1e3 - Adrive = float(mo.group(3)) * 1e3 - dQ = float(mo.group(4)) * 1e-2 - amps[i] = Adrive - if Adrive == 0: - baseline_ind = i - - # Retrieve coefficients data - with open(lookup_filepaths[i], 'rb') as fh: - lookup = pickle.load(fh) - if i == 0: - Qm = lookup['Q'] - nQ = np.size(Qm) - var = np.empty((nfiles, nQ)) - var[i, :] = lookup[varinf.lookup] - else: - if np.array_equal(Qm, lookup['Q']): - var[i, :] = lookup[varinf.lookup] - else: - print('Error: charge vector not consistent') - - # Compute data metrics - namps = amps.size - Amin = np.amin(amps) - Amax = np.amax(amps) - Qmin = np.amin(Qm) - Qmax = np.amax(Qm) - varmin = np.amin(var) - varmax = np.amax(var) - print('Initial data:', nQ, 'charges,', namps, 'amplitudes') - - # Define points for interpolation function - Q_mesh, A_mesh = np.meshgrid(Qm, amps) - points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) - values = var.flatten() - - # Define algorithmic parameters - n_iter_min = 10 - n_iter_max = 30 - MAE_pred = [] - MAE_true = [] - RMSE_true = [] - - # Define estimation matrix - nAest = 20 - nQest = 100 - print('Initial estimation matrix:', nQest, 'charges,', nAest, 'amplitudes') - Aest = np.linspace(Amin, Amax, nAest) - Qest = np.linspace(Qmin, Qmax, nQest) - Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) - x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) - ytrue = f(x).ravel().reshape((nAest, nQest)) - - # Define initial observation matrix - nAobs = 5 - nQobs = 20 - print('Initial estimation matrix:', nQobs, 'charges,', nAobs, 'amplitudes') - Aobs = np.linspace(Amin, Amax, nAobs) - Qobs = np.linspace(Qmin, Qmax, nQobs) - Qobs_mesh, Aobs_mesh = np.meshgrid(Qobs, Aobs) - X0 = np.column_stack([Aobs_mesh.flatten(), Qobs_mesh.flatten()]) - Y0 = f(X0).ravel() - - print('creating Kriging model') - k = kriging(X0, Y0) - - print('initial training') - k.train() - - print('predicting') - y0 = np.array([k.predict(xx) for xx in x]) - y0 = y0.reshape((nAest, nQest)) - - X = X0 - Y = Y0 - - n_iter = 10 - for i in range(n_iter): - print('Infill iteration {0} of {1}....'.format(i + 1, n_iter)) - newpoints = k.infill(2, method='error') - for point in newpoints: - newX = k.inversenormX(point) - newY = f(newX)[0] - print('adding point (({:.3f}, {:.3f}), {:.3f})'.format( - newX[0] * 1e-3, newX[1] * 1e5, newY * varinf.factor)) - X = np.append(X, [newX], axis=0) - Y = np.append(Y, newY) - k.addPoint(newX, newY, norm=True) - k.train() - - y = np.array([k.predict(xx) for xx in x]) - y = y.reshape((nAest, nQest)) - - # Plotting - mymap = cm.get_cmap('viridis') - sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) - sm_amp._A = [] - var_levels = np.linspace(varmin, varmax, 20) * varinf.factor - sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, varmax * varinf.factor)) - sm_var._A = [] - - # True function profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('True function profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, ytrue[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig1.png', format='png') - - # True function map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('True function map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue * varinf.factor, levels=var_levels, - cmap='viridis') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig2.png', format='png') - - # Initial estimation profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Initial estimation profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, y0[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig3.png', format='png') - - # Initial estimation map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Initial estimation map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y0 * varinf.factor, levels=var_levels, - cmap='viridis') - ax.scatter(X0[:, 1] * 1e5, X0[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig4.png', format='png') - - # Final estimation profiles - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - ax.set_title('Final estimation profiles', fontsize=20) - for i in range(nAest): - ax.plot(Qest * 1e5, y[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_amp, cax=cbar_ax) - cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - fig.savefig(pltdir + 'fig7.png', format='png') - - # Final estimation map - fig, ax = plt.subplots(figsize=(10, 6)) - ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) - ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) - ax.set_title('Final estimation map', fontsize=20) - ax.contourf(Qest * 1e5, Aest * 1e-3, y * varinf.factor, levels=var_levels, - cmap='viridis') - ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') - fig.subplots_adjust(right=0.85) - cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) - fig.add_axes() - fig.colorbar(sm_var, cax=cbar_ax) - cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) - fig.savefig(pltdir + 'fig8.png', format='png') - - -plt.show() - diff --git a/paper figures/deprecated/figQSS.py b/paper figures/deprecated/figQSS.py index a4f14a0..cc96cb2 100644 --- a/paper figures/deprecated/figQSS.py +++ b/paper figures/deprecated/figQSS.py @@ -1,294 +1,294 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-09-28 16:13:34 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-22 20:05:30 +# @Last Modified time: 2019-06-06 15:14:32 ''' Subpanels of the QSS approximation figure. ''' import os import logging import numpy as np import matplotlib.pyplot as plt import matplotlib import matplotlib.cm as cm from argparse import ArgumentParser from PySONIC.core import NeuronalBilayerSonophore from PySONIC.utils import logger, selectDirDialog -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def plotQSSvars_vs_Adrive(neuron, a, Fdrive, PRF, DC, fs=8, markers=['-', '--', '.-'], title=None): - neuron = getNeuronsDict()[neuron]() + neuron = getPointNeuron(neuron) # Determine spiking threshold Vthr = neuron.VT # mV Qthr = neuron.Cm0 * Vthr * 1e-3 # C/m2 # Get QSS variables for each amplitude at threshold charge nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) Aref, _, Vmeff, QS_states = nbls.quasiSteadyStates(Fdrive, charges=Qthr, DCs=DC) # Compute US-ON and US-OFF ionic currents currents_on = neuron.currents(Vmeff, QS_states) currents_off = neuron.currents(neuron.VT, QS_states) iNet_on = sum(currents_on.values()) iNet_off = sum(currents_off.values()) # Retrieve list of ionic currents names, with iLeak first ckeys = list(currents_on.keys()) ckeys.insert(0, ckeys.pop(ckeys.index('iLeak'))) # Compute quasi-steady ON, OFF and net charge variations, and threshold amplitude dQ_on = -iNet_on * DC / PRF dQ_off = -iNet_off * (1 - DC) / PRF dQ_net = dQ_on + dQ_off Athr = np.interp(0, dQ_net, Aref, left=0., right=np.nan) # Create figure fig, axes = plt.subplots(4, 1, figsize=(4, 6)) axes[-1].set_xlabel('Amplitude (kPa)', fontsize=fs) for ax in axes: for skey in ['top', 'right']: ax.spines[skey].set_visible(False) ax.set_xscale('log') ax.set_xlim(1e1, 1e2) ax.set_xticks([1e1, 1e2]) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) for item in ax.get_xticklabels(minor=True): item.set_visible(False) figname = '{} neuron thr dynamics {:.1f}nC_cm2 {:.0f}% DC'.format( neuron.name, Qthr * 1e5, DC * 1e2) fig.suptitle(figname, fontsize=fs) # Subplot 1: Vmeff ax = axes[0] ax.set_ylabel('Effective potential (mV)', fontsize=fs) Vbounds = (-120, -40) ax.set_ylim(Vbounds) ax.set_yticks([Vbounds[0], neuron.Vm0, Vbounds[1]]) ax.set_yticklabels(['{:.0f}'.format(Vbounds[0]), '$V_{m0}$', '{:.0f}'.format(Vbounds[1])]) ax.plot(Aref * 1e-3, Vmeff, '--', color='C0', label='ON') ax.plot(Aref * 1e-3, neuron.VT * np.ones(Aref.size), ':', color='C0', label='OFF') ax.axhline(neuron.Vm0, linewidth=0.5, color='k') # Subplot 2: quasi-steady states ax = axes[1] ax.set_ylabel('Quasi-steady states', fontsize=fs) ax.set_yticks([0, 0.5, 0.6]) ax.set_yticklabels(['0', '0.5', '1']) ax.set_ylim([-0.05, 0.65]) d = .01 f = 1.03 xcut = ax.get_xlim()[0] for ycut in [0.54, 0.56]: ax.plot([xcut / f, xcut * f], [ycut - d, ycut + d], color='k', clip_on=False) for label, QS_state in zip(neuron.states, QS_states): if label == 'h': QS_state -= 0.4 ax.plot(Aref * 1e-3, QS_state, label=label) # Subplot 3: currents ax = axes[2] ax.set_ylabel('QSS Currents (mA/m2)', fontsize=fs) Ibounds = (-10, 10) ax.set_ylim(Ibounds) ax.set_yticks([Ibounds[0], 0.0, Ibounds[1]]) for i, key in enumerate(ckeys): c = 'C{}'.format(i) if isinstance(currents_off[key], float): currents_off[key] = np.ones(Aref.size) * currents_off[key] ax.plot(Aref * 1e-3, currents_on[key], '--', label=key, c=c) ax.plot(Aref * 1e-3, currents_off[key], ':', c=c) ax.plot(Aref * 1e-3, iNet_on, '--', color='k', label='iNet') ax.plot(Aref * 1e-3, iNet_off, ':', color='k') ax.axhline(0, color='k', linewidth=0.5) # Subplot 4: charge variations and activation threshold ax = axes[3] ax.set_ylabel('$\\rm \Delta Q_{QS}\ (nC/cm^2)$', fontsize=fs) dQbounds = (-0.06, 0.1) ax.set_ylim(dQbounds) ax.set_yticks([dQbounds[0], 0.0, dQbounds[1]]) ax.plot(Aref * 1e-3, dQ_on, '--', color='C0', label='ON') ax.plot(Aref * 1e-3, dQ_off, ':', color='C0', label='OFF') ax.plot(Aref * 1e-3, dQ_net, color='C0', label='Net') ax.plot([Athr * 1e-3] * 2, [ax.get_ylim()[0], 0], linestyle='--', color='k') ax.plot([Athr * 1e-3], [0], 'o', c='k') ax.axhline(0, color='k', linewidth=0.5) fig.tight_layout() fig.subplots_adjust(right=0.8) for ax in axes: ax.legend(loc='center right', fontsize=fs, frameon=False, bbox_to_anchor=(1.3, 0.5)) if title is not None: fig.canvas.set_window_title(title) return fig def plotQSSdQ_vs_Adrive(neuron, a, Fdrive, PRF, DCs, fs=8, title=None): - neuron = getNeuronsDict()[neuron]() + neuron = getPointNeuron(neuron) # Determine spiking threshold Vthr = neuron.VT # mV Qthr = neuron.Cm0 * Vthr * 1e-3 # C/m2 # Get QSS variables for each amplitude and DC at threshold charge nbls = NeuronalBilayerSonophore(a, neuron, Fdrive) Aref, _, Vmeff, QS_states = nbls.quasiSteadyStates(Fdrive, charges=Qthr, DCs=DCs) dQnet = np.empty((DCs.size, Aref.size)) Athr = np.empty(DCs.size) for i, DC in enumerate(DCs): # Compute US-ON and US-OFF net membrane current from QSS variables iNet_on = neuron.iNet(Vmeff, QS_states[:, :, i]) iNet_off = neuron.iNet(Vthr, QS_states[:, :, i]) # Compute the pulse average net current along the amplitude space iNet_avg = iNet_on * DC + iNet_off * (1 - DC) dQnet[i, :] = -iNet_avg / PRF # Find the threshold amplitude that cancels the pulse average net current Athr[i] = np.interp(0, -iNet_avg, Aref, left=0., right=np.nan) # Create figure fig, ax = plt.subplots(figsize=(4, 2)) figname = '{} neuron thr vs DC'.format(neuron.name, Qthr * 1e5) fig.suptitle(figname, fontsize=fs) for key in ['top', 'right']: ax.spines[key].set_visible(False) ax.set_xscale('log') for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) for item in ax.get_xticklabels(minor=True): item.set_visible(False) ax.set_xlabel('Amplitude (kPa)', fontsize=fs) ax.set_ylabel('$\\rm \Delta Q_{QS}\ (nC/cm^2)$', fontsize=fs) ax.set_xlim(1e1, 1e2) ax.axhline(0., linewidth=0.5, color='k') ax.set_ylim(-0.06, 0.12) ax.set_yticks([-0.05, 0.0, 0.10]) ax.set_yticklabels(['-0.05', '0', '0.10']) norm = matplotlib.colors.LogNorm(DCs.min(), DCs.max()) sm = cm.ScalarMappable(norm=norm, cmap='viridis') sm._A = [] for i, DC in enumerate(DCs): ax.plot(Aref * 1e-3, dQnet[i, :], c=sm.to_rgba(DC), label='{:.0f}% DC'.format(DC * 1e2)) ax.plot([Athr[i] * 1e-3] * 2, [ax.get_ylim()[0], 0], linestyle='--', c=sm.to_rgba(DC)) ax.plot([Athr[i] * 1e-3], [0], 'o', c=sm.to_rgba(DC)) fig.tight_layout() fig.subplots_adjust(right=0.8) ax.legend(loc='center right', fontsize=fs, frameon=False, bbox_to_anchor=(1.3, 0.5)) if title is not None: fig.canvas.set_window_title(title) return fig def plotQSSAthr_vs_DC(neurons, a, Fdrive, DCs_dense, DCs_sparse, fs=8, title=None): fig, ax = plt.subplots(figsize=(3, 3)) ax.set_title('Rheobase amplitudes', fontsize=fs) ax.set_xlabel('Duty cycle (%)', fontsize=fs) ax.set_ylabel('$\\rm A_T\ (kPa)$', fontsize=fs) for key in ['top', 'right']: ax.spines[key].set_visible(False) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) ax.set_xticks([25, 50, 75, 100]) ax.set_yscale('log') ax.set_ylim([10, 600]) norm = matplotlib.colors.LogNorm(DCs_sparse.min(), DCs_sparse.max()) sm = cm.ScalarMappable(norm=norm, cmap='viridis') sm._A = [] for i, neuron in enumerate(neurons): - neuron = getNeuronsDict()[neuron]() + neuron = getPointNeuron(neuron) nbls = NeuronalBilayerSonophore(a, neuron) Athrs_dense = nbls.findRheobaseAmps(DCs_dense, Fdrive, neuron.VT)[0] * 1e-3 # kPa Athrs_sparse = nbls.findRheobaseAmps(DCs_sparse, Fdrive, neuron.VT)[0] * 1e-3 # kPa ax.plot(DCs_dense * 1e2, Athrs_dense, label='{} neuron'.format(neuron.name)) for DC, Athr in zip(DCs_sparse, Athrs_sparse): ax.plot(DC * 1e2, Athr, 'o', label='{:.0f}% DC'.format(DC * 1e2) if i == len(neurons) - 1 else None, c=sm.to_rgba(DC)) ax.legend(fontsize=fs, frameon=False) fig.tight_layout() if title is not None: fig.canvas.set_window_title(title) return fig def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-o', '--outdir', type=str, help='Output directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) figset = args.figset if figset == 'all': figset = ['a', 'b', 'c', 'e'] logger.info('Generating panels {} of {}'.format(figset, figbase)) # Parameters a = 32e-9 # m Fdrive = 500e3 # Hz PRF = 100.0 # Hz DC = 0.5 DCs_sparse = np.array([5, 15, 50, 75, 95]) / 1e2 DCs_dense = np.arange(1, 101) / 1e2 # Figures figs = [] if 'a' in figset: figs += [ plotQSSvars_vs_Adrive('RS', a, Fdrive, PRF, DC, title=figbase + 'a RS'), plotQSSvars_vs_Adrive('LTS', a, Fdrive, PRF, DC, title=figbase + 'a LTS') ] if 'b' in figset: figs += [ plotQSSdQ_vs_Adrive('RS', a, Fdrive, PRF, DCs_sparse, title=figbase + 'b RS'), plotQSSdQ_vs_Adrive('LTS', a, Fdrive, PRF, DCs_sparse, title=figbase + 'b LTS') ] if 'c' in figset: figs.append(plotQSSAthr_vs_DC(['RS', 'LTS'], a, Fdrive, DCs_dense, DCs_sparse, title=figbase + 'c')) if args.save: outdir = selectDirDialog() if args.outdir is None else args.outdir if outdir == '': logger.error('No input directory chosen') return for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(outdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig2.py b/paper figures/fig2.py index c8bc03a..ce3234d 100644 --- a/paper figures/fig2.py +++ b/paper figures/fig2.py @@ -1,329 +1,329 @@ # -*- coding: utf-8 -*- # @Author: Theo # @Date: 2018-06-06 18:38:04 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-05-31 14:25:27 +# @Last Modified time: 2019-06-06 18:26:24 ''' Sub-panels of the model optimization figure. ''' import os import logging import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter from matplotlib.patches import Rectangle from argparse import ArgumentParser from PySONIC.utils import logger, rescale, si_format, selectDirDialog -from PySONIC.plt import getStimPulses, cm2inch +from PySONIC.plt import SchemePlot, cm2inch from PySONIC.constants import NPC_FULL from PySONIC.neurons import CorticalRS from PySONIC.core import BilayerSonophore, NeuronalBilayerSonophore # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def PmApprox(bls, Z, fs=12, lw=2): fig, ax = plt.subplots(figsize=cm2inch(7, 7)) for key in ['right', 'top']: ax.spines[key].set_visible(False) for key in ['bottom', 'left']: ax.spines[key].set_linewidth(2) ax.spines['bottom'].set_position('zero') ax.set_xlabel('Z (nm)', fontsize=fs) ax.set_ylabel('Pressure (kPa)', fontsize=fs, labelpad=-10) ax.set_xticks([0, bls.a * 1e9]) ax.set_xticklabels(['0', 'a']) ax.tick_params(axis='x', which='major', length=25, pad=5) ax.set_yticks([0]) ax.set_ylim([-10, 50]) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) ax.plot(Z * 1e9, bls.v_PMavg(Z, bls.v_curvrad(Z), bls.surface(Z)) * 1e-3, c='g', label='$P_m$') ax.plot(Z * 1e9, bls.PMavgpred(Z) * 1e-3, '--', c='r', label='$\~P_m$') ax.axhline(y=0, color='k') ax.legend(fontsize=fs, frameon=False) fig.tight_layout() fig.canvas.set_window_title(figbase + 'a') return fig def recasting(nbls, Fdrive, Adrive, fs=12, lw=2, ps=15): # Run effective simulation data, _ = nbls.simulate(Fdrive, Adrive, 5 / Fdrive, 0., method='full') t, Qm, Vm = [data[key].values for key in ['t', 'Qm', 'Vm']] t *= 1e6 # us Qm *= 1e5 # nC/cm2 Qrange = (Qm.min(), Qm.max()) dQ = Qrange[1] - Qrange[0] # Create figure and axes fig, axes = plt.subplots(1, 2, figsize=cm2inch(17, 5)) for ax in axes: ax.set_xticks([]) ax.set_yticks([]) # Plot Q-trace and V-trace ax = axes[0] for key in ['top', 'right']: ax.spines[key].set_visible(False) for key in ['bottom', 'left']: ax.spines[key].set_position(('axes', -0.03)) ax.spines[key].set_linewidth(2) ax.plot(t, Vm, label='Vm', c='dimgrey', linewidth=lw) ax.plot(t, Qm, label='Qm', c='k', linewidth=lw) ax.add_patch(Rectangle( (t[0], Qrange[0] - 5), t[-1], dQ + 10, fill=False, edgecolor='k', linestyle='--', linewidth=1 )) ax.yaxis.set_tick_params(width=2) ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) # ax.set_xlim((t.min(), t.max())) ax.set_xticks([]) ax.set_xlabel('{}s'.format(si_format((t.max()), space=' ')), fontsize=fs) ax.set_ylabel('$\\rm nC/cm^2$ - mV', fontsize=fs, labelpad=-15) ax.set_yticks(ax.get_ylim()) for item in ax.get_yticklabels(): item.set_fontsize(fs) # Plot inset on Q-trace ax = axes[1] for key in ['top', 'right', 'bottom', 'left']: ax.spines[key].set_linewidth(1) ax.spines[key].set_linestyle('--') ax.plot(t, Vm, label='Vm', c='dimgrey', linewidth=lw) ax.plot(t, Qm, label='Qm', c='k', linewidth=lw) ax.set_xlim((t.min(), t.max())) ax.set_xticks([]) ax.set_yticks([]) delta = 0.05 ax.set_ylim(Qrange[0] - delta * dQ, Qrange[1] + delta * dQ) fig.canvas.set_window_title(figbase + 'b') return fig def mechSim(bls, Fdrive, Adrive, Qm, fs=12, lw=2, ps=15): # Run mechanical simulation data, _ = bls.simulate(Fdrive, Adrive, Qm) t, Z, ng = [data[key].values for key in ['t', 'Z', 'ng']] # Create figure fig, ax = plt.subplots(figsize=cm2inch(7, 7)) fig.suptitle('Mechanical simulation', fontsize=12) for skey in ['bottom', 'left', 'right', 'top']: ax.spines[skey].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) # Plot variables and labels t_plot = np.insert(t, 0, -1e-6) * 1e6 Pac = Adrive * np.sin(2 * np.pi * Fdrive * t + np.pi) # Pa yvars = {'P_A': Pac * 1e-3, 'Z': Z * 1e9, 'n_g': ng * 1e22} colors = {'P_A': 'k', 'Z': 'C0', 'n_g': 'C5'} dy = 1.2 for i, ykey in enumerate(yvars.keys()): y = yvars[ykey] y_plot = rescale(np.insert(y, 0, y[0])) - dy * i ax.plot(t_plot, y_plot, color=colors[ykey], linewidth=lw) ax.text(t_plot[0] - 0.1, y_plot[0], '$\mathregular{{{}}}$'.format(ykey), fontsize=fs, horizontalalignment='right', verticalalignment='center', color=colors[ykey]) # Acoustic pressure annotations ax.annotate(s='', xy=(1.5, 1.1), xytext=(3.5, 1.1), arrowprops=dict(arrowstyle='<|-|>', color='k')) ax.text(2.5, 1.12, '1/f', fontsize=fs, color='k', horizontalalignment='center', verticalalignment='bottom') ax.annotate(s='', xy=(1.5, -0.1), xytext=(1.5, 1), arrowprops=dict(arrowstyle='<|-|>', color='k')) ax.text(1.55, 0.4, '2A', fontsize=fs, color='k', horizontalalignment='left', verticalalignment='center') # Periodic stabilization patch ax.add_patch(Rectangle((2, -2 * dy - 0.1), 2, 2 * dy, color='dimgrey', alpha=0.3)) ax.text(3, -2 * dy - 0.2, 'limit cycle', fontsize=fs, color='dimgrey', horizontalalignment='center', verticalalignment='top') # Z_last patch ax.add_patch(Rectangle((2, -dy - 0.1), 2, dy, edgecolor='k', facecolor='none', linestyle='--')) # ngeff annotations c = plt.get_cmap('tab20').colors[11] ax.text(t_plot[-1] + 0.1, y_plot[-1], '$\mathregular{n_{g,eff}}$', fontsize=fs, color=c, horizontalalignment='left', verticalalignment='center') ax.scatter([t_plot[-1]], [y_plot[-1]], color=c, s=ps) fig.canvas.set_window_title(figbase + 'c mechsim') return fig def cycleAveraging(bls, neuron, Fdrive, Adrive, Qm, fs=12, lw=2, ps=15): # Run mechanical simulation data, _ = bls.simulate(Fdrive, Adrive, Qm) t, Z, ng = [data[key].values for key in ['t', 'Z', 'ng']] # Compute variables evolution over last acoustic cycle t_last = t[-NPC_FULL:] * 1e6 # us Z_last = Z[-NPC_FULL:] # m Cm = bls.v_Capct(Z_last) * 1e2 # uF/m2 Vm = Qm / Cm * 1e5 # mV yvars = { 'C_m': Cm, # uF/cm2 'V_m': Vm, # mV '\\alpha_m': neuron.alpham(Vm) * 1e3, # ms-1 '\\beta_m': neuron.betam(Vm) * 1e3, # ms-1 'p_\\infty / \\tau_p': neuron.pinf(Vm) / neuron.taup(Vm) * 1e3, # ms-1 '(1-p_\\infty) / \\tau_p': (1 - neuron.pinf(Vm)) / neuron.taup(Vm) * 1e3 # ms-1 } # Determine colors violets = plt.get_cmap('Paired').colors[8:10][::-1] oranges = plt.get_cmap('Paired').colors[6:8][::-1] colors = { 'C_m': ['k', 'dimgrey'], 'V_m': plt.get_cmap('tab20').colors[14:16], '\\alpha_m': violets, '\\beta_m': oranges, 'p_\\infty / \\tau_p': violets, '(1-p_\\infty) / \\tau_p': oranges } # Create figure and axes fig, axes = plt.subplots(6, 1, figsize=cm2inch(4, 15)) fig.suptitle('Cycle-averaging', fontsize=fs) for ax in axes: ax.set_xticks([]) ax.set_yticks([]) for skey in ['bottom', 'left', 'right', 'top']: ax.spines[skey].set_visible(False) # Plot variables for ax, ykey in zip(axes, yvars.keys()): ax.set_xticks([]) ax.set_yticks([]) for skey in ['bottom', 'left', 'right', 'top']: ax.spines[skey].set_visible(False) y = yvars[ykey] ax.plot(t_last, y, color=colors[ykey][0], linewidth=lw) ax.plot([t_last[0], t_last[-1]], [np.mean(y)] * 2, '--', color=colors[ykey][1]) ax.scatter([t_last[-1]], [np.mean(y)], s=ps, color=colors[ykey][1]) ax.text(t_last[0] - 0.1, y[0], '$\mathregular{{{}}}$'.format(ykey), fontsize=fs, horizontalalignment='right', verticalalignment='center', color=colors[ykey][0]) fig.canvas.set_window_title(figbase + 'c cycleavg') return fig def Qsolution(nbls, Fdrive, Adrive, tstim, toffset, PRF, DC, fs=12, lw=2, ps=15): # Run effective simulation data, _ = nbls.simulate(Fdrive, Adrive, tstim, toffset, PRF, DC, method='sonic') t, Qm, states = [data[key].values for key in ['t', 'Qm', 'stimstate']] t *= 1e3 # ms Qm *= 1e5 # nC/cm2 - _, tpulse_on, tpulse_off = getStimPulses(t, states) + _, tpulse_on, tpulse_off = SchemePlot.getStimPulses(t, states) # Add small onset t = np.insert(t, 0, -5.0) Qm = np.insert(Qm, 0, Qm[0]) # Create figure and axes fig, ax = plt.subplots(figsize=cm2inch(12, 6)) ax.set_xticks([]) ax.set_yticks([]) for key in ['top', 'right']: ax.spines[key].set_visible(False) for key in ['bottom', 'left']: ax.spines[key].set_position(('axes', -0.03)) ax.spines[key].set_linewidth(2) # Plot Q-trace and stimulation pulses ax.plot(t, Qm, label='Qm', c='k', linewidth=lw) for ton, toff in zip(tpulse_on, tpulse_off): ax.axvspan(ton, toff, edgecolor='none', facecolor='#8A8A8A', alpha=0.2) ax.yaxis.set_tick_params(width=2) ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) ax.set_xlim((t.min(), t.max())) ax.set_xticks([]) ax.set_xlabel('{}s'.format(si_format((t.max()) * 1e-3, space=' ')), fontsize=fs) ax.set_ylabel('$\\rm nC/cm^2$', fontsize=fs, labelpad=-15) ax.set_yticks(ax.get_ylim()) for item in ax.get_yticklabels(): item.set_fontsize(fs) ax.legend(fontsize=fs, frameon=False) fig.canvas.set_window_title(figbase + 'e Qtrace') return fig def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-o', '--outdir', type=str, help='Output directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) figset = args.figset if figset == 'all': figset = ['a', 'b', 'c', 'e'] logger.info('Generating panels {} of {}'.format(figset, figbase)) # Parameters neuron = CorticalRS() a = 32e-9 # m Fdrive = 500e3 # Hz Adrive = 100e3 # Pa PRF = 100. # Hz DC = 0.5 tstim = 150e-3 # s toffset = 100e-3 # s Qm = -71.9e-5 # C/cm2 bls = BilayerSonophore(a, neuron.Cm0, neuron.Cm0 * neuron.Vm0 * 1e-3) nbls = NeuronalBilayerSonophore(a, neuron) # Figures figs = [] if 'a' in figset: figs.append(PmApprox(bls, np.linspace(-0.4 * bls.Delta_, bls.a, 1000))) if 'b' in figset: figs.append(recasting(nbls, Fdrive, Adrive)) if 'c' in figset: figs += [ mechSim(bls, Fdrive, Adrive, Qm), cycleAveraging(bls, neuron, Fdrive, Adrive, Qm) ] if 'e' in figset: figs.append(Qsolution(nbls, Fdrive, Adrive, tstim, toffset, PRF, DC)) if args.save: outdir = selectDirDialog() if args.outdir is None else args.outdir if outdir == '': logger.error('No input directory chosen') return for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(outdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig4.py b/paper figures/fig4.py index 529d25e..b3f4864 100644 --- a/paper figures/fig4.py +++ b/paper figures/fig4.py @@ -1,84 +1,83 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-02-15 15:59:37 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-02-27 17:19:42 +# @Last Modified time: 2019-06-06 15:19:18 ''' Sub-panels of the effective variables figure. ''' import os import matplotlib import matplotlib.pyplot as plt from argparse import ArgumentParser import logging from PySONIC.plt import plotEffectiveVariables from PySONIC.utils import logger, selectDirDialog -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-o', '--outdir', type=str, help='Output directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) figset = args.figset if figset == 'all': figset = ['a', 'b', 'c'] logger.info('Generating panels {} of {}'.format(figset, figbase)) # Parameters - neuron_str = 'RS' - neuron = getNeuronsDict()[neuron_str]() + neuron = getPointNeuron('RS') a = 32e-9 # m Fdrive = 500e3 # Hz Adrive = 50e3 # Pa # Generate figures figs = [] if 'a' in figset: fig = plotEffectiveVariables(neuron, a=a, Fdrive=Fdrive, cmap='Oranges', zscale='log') fig.canvas.set_window_title(figbase + 'a') figs.append(fig) if 'b' in figset: fig = plotEffectiveVariables(neuron, a=a, Adrive=Adrive, cmap='Greens', zscale='log') fig.canvas.set_window_title(figbase + 'b') figs.append(fig) if 'c' in figset: fig = plotEffectiveVariables(neuron, Fdrive=Fdrive, Adrive=Adrive, cmap='Blues', zscale='log') fig.canvas.set_window_title(figbase + 'c') figs.append(fig) if args.save: outdir = selectDirDialog() if args.outdir is None else args.outdir if outdir == '': logger.error('No input directory chosen') return for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(outdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig5.py b/paper figures/fig5.py index b9c6680..f77a29a 100644 --- a/paper figures/fig5.py +++ b/paper figures/fig5.py @@ -1,358 +1,355 @@ # -*- coding: utf-8 -*- # @Author: Theo # @Date: 2018-06-06 18:38:04 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 11:53:17 +# @Last Modified time: 2019-06-06 18:18:04 ''' Sub-panels of the NICE and SONIC accuracies comparative figure. ''' import os import logging import numpy as np import matplotlib import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.utils import * from PySONIC.neurons import * -from PySONIC.plt import plotComp, plotSpikingMetrics, cm2inch +from PySONIC.plt import ComparativePlot, plotSpikingMetrics, cm2inch from utils import * # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def Qprofiles_vs_amp(neuron, a, Fdrive, CW_Athrs, tstim, toffset, inputdir): ''' Comparison of resulting charge profiles for CW stimuli at sub-threshold, threshold and supra-threshold amplitudes. ''' Athr = CW_Athrs[neuron].loc[Fdrive * 1e-3] # kPa amps = np.array([Athr - 5., Athr, Athr + 20.]) * 1e3 # Pa subdir = os.path.join(inputdir, neuron) sonic_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], amps, [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], amps, [tstim], [toffset], [None], [1.], 'full')) regimes = ['AT - 5 kPa', 'AT', 'AT + 20 kPa'] - fig = plotComp( - sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), - 'Qm', + comp_plot = ComparativePlot(sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), 'Qm') + fig = comp_plot.render( labels=sum([['', x] for x in regimes], []), lines=['-', '--'] * len(regimes), colors=plt.get_cmap('Paired').colors[:2 * len(regimes)], - fs=8, patches='one', xticks=[0, 250], yticks=[getNeuronsDict()[neuron].Vm0, 25], + fs=8, + patches='one', + xticks=[0, 250], + yticks=[getPointNeuron(neuron).Vm0, 25], straightlegend=True, figsize=cm2inch(12.5, 5.8) ) fig.axes[0].get_xaxis().set_label_coords(0.5, -0.05) fig.subplots_adjust(bottom=0.2, right=0.95, top=0.95) fig.canvas.set_window_title(figbase + 'a Qprofiles') return fig def spikemetrics_vs_amp(neuron, a, Fdrive, amps, tstim, toffset, inputdir): ''' Comparison of spiking metrics for CW stimuli at various supra-threshold amplitudes. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], amps, [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], amps, [tstim], [toffset], [None], [1.], 'full')) data_fpaths = {'full': full_fpaths, 'sonic': sonic_fpaths} metrics_files = {x: '{}_spikemetrics_vs_amplitude_{}.csv'.format(neuron, x) for x in ['full', 'sonic']} metrics_fpaths = {key: os.path.join(inputdir, value) for key, value in metrics_files.items()} xlabel = 'Amplitude (kPa)' metrics = getSpikingMetrics( subdir, neuron, amps * 1e-3, xlabel, data_fpaths, metrics_fpaths) fig = plotSpikingMetrics(amps * 1e-3, xlabel, {neuron: metrics}, logscale=True) fig.canvas.set_window_title(figbase + 'a spikemetrics') return fig def Qprofiles_vs_freq(neuron, a, freqs, CW_Athrs, tstim, toffset, inputdir): ''' Comparison of resulting charge profiles for supra-threshold CW stimuli at low and high US frequencies. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths, full_fpaths = [], [] for Fdrive in freqs: Athr = CW_Athrs[neuron].loc[Fdrive * 1e-3] # kPa Adrive = (Athr + 20.) * 1e3 # Pa sonic_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'full')) - fig = plotComp( - sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), - 'Qm', + comp_plot = ComparativePlot(sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), 'Qm') + fig = comp_plot.render( labels=sum([['', '{}Hz'.format(si_format(f))] for f in freqs], []), lines=['-', '--'] * len(freqs), colors=plt.get_cmap('Paired').colors[6:10], fs=8, - patches='one', xticks=[0, 250], yticks=[getNeuronsDict()[neuron].Vm0, 25], + patches='one', xticks=[0, 250], yticks=[getPointNeuron(neuron).Vm0, 25], straightlegend=True, figsize=cm2inch(12.5, 5.8), inset={'xcoords': [5, 40], 'ycoords': [-35, 45], 'xlims': [57.5, 58.5], 'ylims': [10, 35]} ) fig.axes[0].get_xaxis().set_label_coords(0.5, -0.05) fig.subplots_adjust(bottom=0.2, right=0.95, top=0.95) fig.canvas.set_window_title(figbase + 'b Qprofiles') return fig def spikemetrics_vs_freq(neuron, a, freqs, CW_Athrs, tstim, toffset, inputdir): ''' Comparison of spiking metrics for supra-threshold CW stimuli at various US frequencies. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths, full_fpaths = [], [] for Fdrive in freqs: Athr = CW_Athrs[neuron].loc[Fdrive * 1e-3] # kPa Adrive = (Athr + 20.) * 1e3 # Pa sonic_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'full')) data_fpaths = {'full': full_fpaths, 'sonic': sonic_fpaths} metrics_files = {x: '{}_spikemetrics_vs_frequency_{}.csv'.format(neuron, x) for x in ['full', 'sonic']} metrics_fpaths = {key: os.path.join(inputdir, value) for key, value in metrics_files.items()} xlabel = 'Frequency (kHz)' metrics = getSpikingMetrics( subdir, neuron, freqs * 1e-3, xlabel, data_fpaths, metrics_fpaths) fig = plotSpikingMetrics(freqs * 1e-3, xlabel, {neuron: metrics}, logscale=True) fig.canvas.set_window_title(figbase + 'b spikemetrics') return fig def Qprofiles_vs_radius(neuron, radii, Fdrive, CW_Athrs, tstim, toffset, inputdir): ''' Comparison of resulting charge profiles for supra-threshold CW stimuli for small and large sonophore radii. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths, full_fpaths = [], [] for a in radii: Athr = CW_Athrs[neuron].loc[a * 1e9] # kPa Adrive = (Athr + 20.) * 1e3 # Pa sonic_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'full')) tmp = plt.get_cmap('Paired').colors colors = tmp[2:4] + tmp[10:12] - - fig = plotComp( - sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), - 'Qm', + comp_plot = ComparativePlot(sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), 'Qm') + fig = comp_plot.render( labels=sum([['', '{:.0f} nm'.format(a * 1e9)] for a in radii], []), lines=['-', '--'] * len(radii), colors=colors, fs=8, - patches='one', xticks=[0, 250], yticks=[getNeuronsDict()[neuron].Vm0, 25], + patches='one', xticks=[0, 250], yticks=[getPointNeuron(neuron).Vm0, 25], straightlegend=True, figsize=cm2inch(12.5, 5.8) ) fig.axes[0].get_xaxis().set_label_coords(0.5, -0.05) fig.subplots_adjust(bottom=0.2, right=0.95, top=0.95) fig.canvas.set_window_title(figbase + 'c Qprofiles') return fig def spikemetrics_vs_radius(neuron, radii, Fdrive, CW_Athrs, tstim, toffset, inputdir): ''' Comparison of spiking metrics for supra-threshold CW stimuli with various sonophore diameters. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths, full_fpaths = [], [] for a in radii: Athr = CW_Athrs[neuron].loc[np.round(a * 1e9, 1)] # kPa Adrive = (Athr + 20.) * 1e3 # Pa sonic_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'sonic')) full_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [None], [1.], 'full')) data_fpaths = {'full': full_fpaths, 'sonic': sonic_fpaths} metrics_files = {x: '{}_spikemetrics_vs_radius_{}.csv'.format(neuron, x) for x in ['full', 'sonic']} metrics_fpaths = {key: os.path.join(inputdir, value) for key, value in metrics_files.items()} xlabel = 'Sonophore radius (nm)' metrics = getSpikingMetrics( subdir, neuron, radii * 1e9, xlabel, data_fpaths, metrics_fpaths) fig = plotSpikingMetrics(radii * 1e9, xlabel, {neuron: metrics}, logscale=True) fig.canvas.set_window_title(figbase + 'c spikemetrics') return fig def Qprofiles_vs_DC(neurons, a, Fdrive, Adrive, tstim, toffset, PRF, DC, inputdir): ''' Comparison of resulting charge profiles for PW stimuli at 5% duty cycle for different neuron types. ''' sonic_fpaths, full_fpaths = [], [] for neuron in neurons: subdir = os.path.join(inputdir, neuron) sonic_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [PRF], [DC], 'sonic')) full_fpaths += getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [PRF], [DC], 'full')) colors = list(plt.get_cmap('Paired').colors[:6]) del colors[2:4] - fig = plotComp( - sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), - 'Qm', + comp_plot = ComparativePlot(sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), 'Qm') + fig = comp_plot.render( labels=sum([['', '{}, {:.0f}% DC'.format(x, DC * 1e2)] for x in neurons], []), lines=['-', '--'] * len(neurons), colors=colors, fs=8, patches='one', - xticks=[0, 250], yticks=[min(getNeuronsDict()[n].Vm0 for n in neurons), 50], + xticks=[0, 250], yticks=[min(getPointNeuron(n).Vm0 for n in neurons), 50], straightlegend=True, figsize=cm2inch(12.5, 5.8) ) fig.axes[0].get_xaxis().set_label_coords(0.5, -0.05) fig.subplots_adjust(bottom=0.2, right=0.95, top=0.95) fig.canvas.set_window_title(figbase + 'd Qprofiles') return fig def spikemetrics_vs_DC(neurons, a, Fdrive, Adrive, tstim, toffset, PRF, DCs, inputdir): ''' Comparison of spiking metrics for PW stimuli at various duty cycle for different neuron types. ''' metrics_dict = {} xlabel = 'Duty cycle (%)' colors = list(plt.get_cmap('Paired').colors[:6]) del colors[2:4] colors_dict = {} for i, neuron in enumerate(neurons): subdir = os.path.join(inputdir, neuron) sonic_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [PRF], DCs, 'sonic')) full_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], [PRF], DCs, 'full')) metrics_files = {x: '{}_spikemetrics_vs_DC_{}.csv'.format(neuron, x) for x in ['full', 'sonic']} metrics_fpaths = {key: os.path.join(inputdir, value) for key, value in metrics_files.items()} sonic_fpaths = sonic_fpaths[1:] + [sonic_fpaths[0]] full_fpaths = full_fpaths[1:] + [full_fpaths[0]] data_fpaths = {'full': full_fpaths, 'sonic': sonic_fpaths} metrics_dict[neuron] = getSpikingMetrics( subdir, neuron, DCs * 1e2, xlabel, data_fpaths, metrics_fpaths) colors_dict[neuron] = {'full': colors[2 * i], 'sonic': colors[2 * i + 1]} fig = plotSpikingMetrics(DCs * 1e2, xlabel, metrics_dict, spikeamp=False, colors=colors_dict) fig.canvas.set_window_title(figbase + 'd spikemetrics') return fig def Qprofiles_vs_PRF(neuron, a, Fdrive, Adrive, tstim, toffset, PRFs, DC, inputdir): ''' Comparison of resulting charge profiles for PW stimuli at 5% duty cycle with different pulse repetition frequencies. ''' subdir = os.path.join(inputdir, neuron) sonic_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], PRFs, [DC], 'sonic')) full_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], PRFs, [DC], 'full')) patches = [False, True] * len(PRFs) patches[-1] = False - fig = plotComp( - sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), - 'Qm', + comp_plot = ComparativePlot(sum([[x, y] for x, y in zip(full_fpaths, sonic_fpaths)], []), 'Qm') + fig = comp_plot.render( labels=sum([['', '{}Hz PRF'.format(si_format(PRF, space=' '))] for PRF in PRFs], []), lines=['-', '--'] * len(PRFs), colors=plt.get_cmap('Paired').colors[4:12], fs=8, patches=patches, - xticks=[0, 250], yticks=[getNeuronsDict()[neuron].Vm0, 50], + xticks=[0, 250], yticks=[getPointNeuron(neuron).Vm0, 50], straightlegend=True, figsize=cm2inch(12.5, 5.8) ) fig.axes[0].get_xaxis().set_label_coords(0.5, -0.05) fig.subplots_adjust(bottom=0.2, right=0.95, top=0.95) fig.canvas.set_window_title(figbase + 'e Qprofiles') return fig def spikemetrics_vs_PRF(neuron, a, Fdrive, Adrive, tstim, toffset, PRFs, DC, inputdir): ''' Comparison of spiking metrics for PW stimuli at 5% duty cycle with different pulse repetition frequencies. ''' xlabel = 'PRF (Hz)' subdir = os.path.join(inputdir, neuron) sonic_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], PRFs, [DC], 'sonic')) full_fpaths = getSims(subdir, neuron, a, nbls.simQueue( [Fdrive], [Adrive], [tstim], [toffset], PRFs, [DC], 'full')) data_fpaths = {'full': full_fpaths, 'sonic': sonic_fpaths} metrics_files = {x: '{}_spikemetrics_vs_PRF_{}.csv'.format(neuron, x) for x in ['full', 'sonic']} metrics_fpaths = {key: os.path.join(inputdir, value) for key, value in metrics_files.items()} metrics = getSpikingMetrics( subdir, neuron, PRFs, xlabel, data_fpaths, metrics_fpaths) fig = plotSpikingMetrics(PRFs, xlabel, {neuron: metrics}, spikeamp=False, logscale=True) fig.canvas.set_window_title(figbase + 'e spikemetrics') return fig def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-i', '--inputdir', type=str, help='Input directory') ap.add_argument('-f', '--figset', type=str, help='Figure set', default='a') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) inputdir = selectDirDialog() if args.inputdir is None else args.inputdir if inputdir == '': logger.error('No input directory chosen') return figset = args.figset logger.info('Generating panel {} of {}'.format(figset, figbase)) # Parameters radii = np.array([16, 22.6, 32, 45.3, 64]) * 1e-9 # m a = 32e-9 # m tstim = 150e-3 # s toffset = 100e-3 # s freqs = np.array([20e3, 100e3, 500e3, 1e6, 2e6, 3e6, 4e6]) # Hz Fdrive = 500e3 # Hz amps = np.array([50, 100, 300, 600]) * 1e3 # Pa Adrive = 100e3 # Pa PRFs_sparse = np.array([1e1, 1e2, 1e3, 1e4]) # Hz PRFs_dense = sum([[x, 2 * x, 5 * x] for x in PRFs_sparse[:-1]], []) + [PRFs_sparse[-1]] # Hz PRF = 100 # Hz DCs = np.array([5, 10, 25, 50, 75, 100]) * 1e-2 DC = 0.05 # Get threshold amplitudes if needed if 'a' in figset or 'b' in figset: CW_Athr_vs_Fdrive = getCWtitrations_vs_Fdrive( ['RS'], a, freqs, tstim, toffset, os.path.join(inputdir, 'CW_Athrs_vs_freqs.csv')) if 'c' in figset: CW_Athr_vs_radius = getCWtitrations_vs_radius( ['RS'], radii, Fdrive, tstim, toffset, os.path.join(inputdir, 'CW_Athrs_vs_radius.csv')) # Generate figures figs = [] if figset == 'a': figs.append(Qprofiles_vs_amp('RS', a, Fdrive, CW_Athr_vs_Fdrive, tstim, toffset, inputdir)) figs.append(spikemetrics_vs_amp('RS', a, Fdrive, amps, tstim, toffset, inputdir)) if figset == 'b': figs.append(Qprofiles_vs_freq( 'RS', a, [freqs.min(), freqs.max()], CW_Athr_vs_Fdrive, tstim, toffset, inputdir)) figs.append(spikemetrics_vs_freq('RS', a, freqs, CW_Athr_vs_Fdrive, tstim, toffset, inputdir)) if figset == 'c': figs.append(Qprofiles_vs_radius( 'RS', [radii.min(), radii.max()], Fdrive, CW_Athr_vs_radius, tstim, toffset, inputdir)) figs.append(spikemetrics_vs_radius( 'RS', radii, Fdrive, CW_Athr_vs_radius, tstim, toffset, inputdir)) if figset == 'd': figs.append(Qprofiles_vs_DC( ['RS', 'LTS'], a, Fdrive, Adrive, tstim, toffset, PRF, DC, inputdir)) figs.append(spikemetrics_vs_DC( ['RS', 'LTS'], a, Fdrive, Adrive, tstim, toffset, PRF, DCs, inputdir)) if figset == 'e': figs.append(Qprofiles_vs_PRF( 'LTS', a, Fdrive, Adrive, tstim, toffset, PRFs_sparse, DC, inputdir)) figs.append(spikemetrics_vs_PRF( 'LTS', a, Fdrive, Adrive, tstim, toffset, PRFs_dense, DC, inputdir)) if args.save: for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(inputdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig7.py b/paper figures/fig7.py index 3c125fa..4049d2a 100644 --- a/paper figures/fig7.py +++ b/paper figures/fig7.py @@ -1,152 +1,152 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-09-26 09:51:43 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-05-31 15:22:32 +# @Last Modified time: 2019-06-06 15:16:34 ''' Sub-panels of (duty-cycle x amplitude) US activation maps and related Q-V traces. ''' import os import numpy as np import logging import matplotlib import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.core import NeuronalBilayerSonophore from PySONIC.utils import logger, selectDirDialog, si_format from PySONIC.plt import plotActivationMap, plotQVeff -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def plot_actmap(inputdir, neuron, a, Fdrive, tstim, amps, PRF, DCs, FRbounds, insets, prefix): mapcode = '{} {}Hz PRF{}Hz 1s'.format(neuron, *si_format([Fdrive, PRF, tstim], space='')) subdir = os.path.join(inputdir, mapcode) fig = plotActivationMap( subdir, neuron, a, Fdrive, tstim, PRF, amps, DCs, FRbounds=FRbounds, thrs=True) fig.canvas.set_window_title('{} map {}'.format(prefix, mapcode)) ax = fig.axes[0] DC_insets, A_insets = zip(*insets) ax.scatter(DC_insets, A_insets, s=80, facecolors='none', edgecolors='k', linestyle='--') return fig def plot_traces(inputdir, neuron, a, Fdrive, Adrive, tstim, PRF, DC, tmax, Vbounds, prefix): mapcode = '{} {}Hz PRF{}Hz 1s'.format(neuron, *si_format([Fdrive, PRF, tstim], space='')) subdir = os.path.join(inputdir, mapcode) - neuronobj = getNeuronsDict()[neuron]() + neuronobj = getPointNeuron(neuron) nbls = NeuronalBilayerSonophore(a, neuronobj) fname = '{}.pkl'.format(nbls.filecode(Fdrive, Adrive, tstim, 0., PRF, DC, 'sonic')) fpath = os.path.join(subdir, fname) fig = plotQVeff(fpath, tmax=tmax, ybounds=Vbounds) figcode = '{} VQ trace {} {:.1f}kPa {:.0f}%DC'.format(prefix, neuron, Adrive * 1e-3, DC * 1e2) fig.canvas.set_window_title(figcode) return fig def panel(inputdir, neurons, a, tstim, PRF, amps, DCs, FRbounds, tmax, Vbounds, insets, prefix): mapfigs = [ plot_actmap(inputdir, n, a, 500e3, tstim, amps, PRF, DCs, FRbounds, insets[n], prefix) for n in neurons ] tracefigs = [] for n in neurons: for inset in insets[n]: DC = inset[0] * 1e-2 Adrive = inset[1] * 1e3 tracefigs.append(plot_traces( inputdir, n, a, 500e3, Adrive, tstim, PRF, DC, tmax, Vbounds, prefix)) return mapfigs + tracefigs def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-i', '--inputdir', type=str, help='Input directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) inputdir = selectDirDialog() if args.inputdir is None else args.inputdir if inputdir == '': logger.error('No input directory chosen') return figset = args.figset if figset == 'all': figset = ['a', 'b', 'c'] logger.info('Generating panel {} of {}'.format(figset, figbase)) # Parameters neurons = ['RS', 'LTS'] a = 32e-9 # m tstim = 1.0 # s amps = np.logspace(np.log10(10), np.log10(600), num=30) * 1e3 # Pa DCs = np.arange(1, 101) * 1e-2 FRbounds = (1e0, 1e3) # Hz tmax = 240 # ms Vbounds = -150, 50 # mV # Generate figures try: figs = [] if 'a' in figset: PRF = 1e1 insets = { 'RS': [(28, 127.0), (37, 168.4)], 'LTS': [(8, 47.3), (30, 146.2)] } figs += panel(inputdir, neurons, a, tstim, PRF, amps, DCs, FRbounds, tmax, Vbounds, insets, figbase + 'a') if 'b' in figset: PRF = 1e2 insets = { 'RS': [(51, 452.4), (56, 452.4)], 'LTS': [(13, 193.9), (43, 257.2)] } figs += panel(inputdir, neurons, a, tstim, PRF, amps, DCs, FRbounds, tmax, Vbounds, insets, figbase + 'b') if 'c' in figset: PRF = 1e3 insets = { 'RS': [(40, 110.2), (64, 193.9)], 'LTS': [(10, 47.3), (53, 168.4)] } figs += panel(inputdir, neurons, a, tstim, PRF, amps, DCs, FRbounds, tmax, Vbounds, insets, figbase + 'c') except Exception as e: logger.error(e) quit() if args.save: for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(inputdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig8.py b/paper figures/fig8.py index dc506e4..30148fd 100644 --- a/paper figures/fig8.py +++ b/paper figures/fig8.py @@ -1,150 +1,150 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-11-27 17:57:45 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-15 00:16:21 +# @Last Modified time: 2019-06-06 15:15:59 ''' Sub-panels of threshold curves for various sonophore radii and US frequencies. ''' import os import logging import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt from argparse import ArgumentParser -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron from PySONIC.utils import logger, si_format, selectDirDialog from PySONIC.plt import cm2inch # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def getThresholdAmplitudes(root, neuron, a, Fdrive, tstim, PRF): subfolder = '{} {:.0f}nm {}Hz PRF{}Hz {}s'.format( neuron, a * 1e9, *si_format([Fdrive, PRF, tstim], 0, space='') ) fname = 'log_ASTIM.xlsx' fpath = os.path.join(root, subfolder, fname) df = pd.read_excel(fpath, sheet_name='Data') DCs = df['Duty factor'].values Athrs = df['Adrive (kPa)'].values iDCs = np.argsort(DCs) DCs = DCs[iDCs] Athrs = Athrs[iDCs] return DCs, Athrs def plotThresholdAmps(root, neurons, radii, freqs, PRF, tstim, fs=10, colors=None, figsize=None): ''' Plot threshold excitation amplitudes of several neurons determined by titration procedures, as a function of duty cycle, for various combinations of sonophore radius and US frequency. :param neurons: list of neuron names :param radii: list of sonophore radii (m) :param freqs: list US frequencies (Hz) :param PRF: pulse repetition frequency used for titration procedures (Hz) :param tstim: stimulus duration used for titration procedures :return: figure handle ''' if figsize is None: figsize = cm2inch(8, 7) linestyles = ['--', ':', '-.'] assert len(freqs) <= len(linestyles), 'too many frequencies' fig, ax = plt.subplots(figsize=figsize) ax.set_xlabel('Duty cycle (%)', fontsize=fs) ax.set_ylabel('Amplitude (kPa)', fontsize=fs) for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) ax.set_yscale('log') ax.set_xlim([0, 100]) ax.set_ylim([10, 600]) linestyles = ['-', '--'] for neuron, ls in zip(neurons, linestyles): - neuron = getNeuronsDict()[neuron]() + neuron = getPointNeuron(neuron) icolor = 0 for i, a in enumerate(radii): for j, Fdrive in enumerate(freqs): if colors is None: color = 'C{}'.format(icolor) else: color = colors[icolor] DCs, Athrs = getThresholdAmplitudes(root, neuron.name, a, Fdrive, tstim, PRF) lbl = '{} neuron, {:.0f} nm, {}Hz, {}Hz PRF'.format( neuron.name, a * 1e9, *si_format([Fdrive, PRF], 0, space=' ')) ax.plot(DCs * 1e2, Athrs, ls, c=color, label=lbl) icolor += 1 ax.legend(fontsize=fs - 5, frameon=False) fig.tight_layout() return fig def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-i', '--inputdir', type=str, help='Input directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) inputdir = selectDirDialog() if args.inputdir is None else args.inputdir if inputdir == '': logger.error('No input directory chosen') return figset = args.figset if figset == 'all': figset = ['a', 'b'] logger.info('Generating panels {} of {}'.format(figset, figbase)) # Parameters neurons = ['RS', 'LTS'] radii = np.array([16, 32, 64]) * 1e-9 # m a = radii[1] freqs = np.array([20, 500, 4000]) * 1e3 # Hz Fdrive = freqs[1] PRFs = np.array([1e1, 1e2, 1e3]) # Hz PRF = PRFs[1] tstim = 1 # s colors = plt.get_cmap('tab20c').colors # Generate figures figs = [] if 'a' in figset: fig = plotThresholdAmps(inputdir, neurons, radii, [Fdrive], PRF, tstim, fs=12, colors=colors[:3][::-1]) fig.canvas.set_window_title(figbase + 'a') figs.append(fig) if 'b' in figset: fig = plotThresholdAmps(inputdir, neurons, [a], freqs, PRF, tstim, fs=12, colors=colors[8:11][::-1]) fig.canvas.set_window_title(figbase + 'b') figs.append(fig) if args.save: for fig in figs: figname = '{}.pdf'.format(fig.canvas.get_window_title()) fig.savefig(os.path.join(inputdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/fig9.py b/paper figures/fig9.py index 9bbcd8f..0d5c444 100644 --- a/paper figures/fig9.py +++ b/paper figures/fig9.py @@ -1,106 +1,106 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-12-09 12:06:01 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-05-31 15:23:40 +# @Last Modified time: 2019-06-06 18:22:09 ''' Sub-panels of SONIC model validation on an STN neuron (response to CW sonication). ''' import os import logging import numpy as np import matplotlib import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.core import NeuronalBilayerSonophore from PySONIC.neurons import OtsukaSTN from PySONIC.utils import logger, selectDirDialog, getLowIntensitiesSTN, Intensity2Pressure -from PySONIC.plt import plotFRProfile, plotBatch +from PySONIC.plt import plotFRProfile, SchemePlot # Plot parameters matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['font.family'] = 'arial' # Figure basename figbase = os.path.splitext(__file__)[0] def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-i', '--inputdir', type=str, help='Input directory') ap.add_argument('-f', '--figset', type=str, nargs='+', help='Figure set', default='all') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output figures as pdf') args = ap.parse_args() loglevel = logging.DEBUG if args.verbose is True else logging.INFO logger.setLevel(loglevel) inputdir = selectDirDialog() if args.inputdir is None else args.inputdir if inputdir == '': logger.error('No input directory chosen') return figset = args.figset if figset is 'all': figset = ['a', 'b'] logger.info('Generating panels {} of {}'.format(figset, figbase)) # Parameters neuron = OtsukaSTN() a = 32e-9 # m Fdrive = 500e3 # Hz tstim = 1 # s toffset = 0. # s PRF = 1e2 DC = 1. nbls = NeuronalBilayerSonophore(a, neuron) # Range of intensities intensities = getLowIntensitiesSTN() # W/m2 # Levels depicted with individual traces subset_intensities = [112, 114, 123] # W/m2 # convert to amplitudes and get filepaths amplitudes = Intensity2Pressure(intensities) # Pa fnames = ['{}.pkl'.format(nbls.filecode(Fdrive, A, tstim, toffset, PRF, DC, 'sonic')) for A in amplitudes] fpaths = [os.path.join(inputdir, 'STN', fn) for fn in fnames] # Generate figures figs = [] if 'a' in figset: fig = plotFRProfile(fpaths, 'Qm', no_offset=True, no_first=False, zref='A', zscale='lin', cmap='Oranges') fig.canvas.set_window_title(figbase + 'a') figs.append(fig) if 'b' in figset: isubset = [np.argwhere(intensities == x)[0][0] for x in subset_intensities] subset_amplitudes = amplitudes[isubset] titles = ['{:.2f} kPa ({:.0f} W/m2)'.format(A * 1e-3, I) for A, I in zip(subset_amplitudes, subset_intensities)] print(titles) - figtraces = plotBatch([fpaths[i] for i in isubset], pltscheme={'Q_m': ['Qm']}) + figtraces = SchemePlot([fpaths[i] for i in isubset], pltscheme={'Q_m': ['Qm']})() for fig, title in zip(figtraces, titles): fig.axes[0].set_title(title) fig.canvas.set_window_title(figbase + 'b {}'.format(title)) figs.append(fig) if args.save: for fig in figs: s = fig.canvas.get_window_title() s = s.replace('(', '- ').replace('/', '_').replace(')', '') figname = '{}.pdf'.format(s) fig.savefig(os.path.join(inputdir, figname), transparent=True) else: plt.show() if __name__ == '__main__': main() diff --git a/paper figures/utils.py b/paper figures/utils.py index 6bd62db..cc81422 100644 --- a/paper figures/utils.py +++ b/paper figures/utils.py @@ -1,120 +1,120 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2018-10-01 20:45:29 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 13:42:06 +# @Last Modified time: 2019-06-06 15:15:30 import os import numpy as np import pandas as pd from PySONIC.utils import * from PySONIC.core import NeuronalBilayerSonophore from PySONIC.neurons import * from PySONIC.postpro import computeSpikingMetrics def getCWtitrations_vs_Fdrive(neurons, a, freqs, tstim, toffset, fpath): fkey = 'Fdrive (kHz)' freqs = np.array(freqs) if os.path.isfile(fpath): df = pd.read_csv(fpath, sep=',', index_col=fkey) else: df = pd.DataFrame(index=freqs * 1e-3) for neuron in neurons: if neuron not in df: - neuronobj = getNeuronsDict()[neuron]() + neuronobj = getPointNeuron(neuron) nbls = NeuronalBilayerSonophore(a, neuronobj) for i, Fdrive in enumerate(freqs): logger.info('Running CW titration for %s neuron @ %sHz', neuron, si_format(Fdrive)) Athr = nbls.titrate(Fdrive, tstim, toffset) # Pa df.loc[Fdrive * 1e-3, neuron] = np.ceil(Athr * 1e-2) / 10 df.sort_index(inplace=True) df.to_csv(fpath, sep=',', index_label=fkey) return df def getCWtitrations_vs_radius(neurons, radii, Fdrive, tstim, toffset, fpath): akey = 'radius (nm)' radii = np.array(radii) if os.path.isfile(fpath): df = pd.read_csv(fpath, sep=',', index_col=akey) else: df = pd.DataFrame(index=radii * 1e9) for neuron in neurons: if neuron not in df: - neuronobj = getNeuronsDict()[neuron]() + neuronobj = getPointNeuron(neuron) for a in radii: nbls = NeuronalBilayerSonophore(a, neuronobj) logger.info( 'Running CW titration for %s neuron @ %sHz (%.2f nm sonophore radius)', neuron, si_format(Fdrive), a * 1e9) Athr = nbls.titrate(Fdrive, tstim, toffset) # Pa df.loc[a * 1e9, neuron] = np.ceil(Athr * 1e-2) / 10 df.sort_index(inplace=True) df.to_csv(fpath, sep=',', index_label=akey) return df def getSims(outdir, neuron, a, queue): fpaths = [] updated_queue = [] - neuronobj = getNeuronsDict()[neuron]() + neuronobj = getPointNeurons(neuron) nbls = NeuronalBilayerSonophore(a, neuronobj) for i, item in enumerate(queue): Fdrive, tstim, toffset, PRF, DC, Adrive, method = item fcode = nbls.filecode(Fdrive, Adrive, tstim, toffset, PRF, DC, method) fpath = os.path.join(outdir, '{}.pkl'.format(fcode)) if not os.path.isfile(fpath): print(fpath, 'does not exist') item.insert(0, outdir) updated_queue.append(item) fpaths.append(fpath) if len(updated_queue) > 0: print(updated_queue) - # neuron = getNeuronsDict()[neuron]() + # neuron = getPointNeuron(neuron) # nbls = NeuronalBilayerSonophore(a, neuron) # batch = Batch(nbls.runAndSave, updated_queue) # batch.run(mpi=True) return fpaths def getSpikingMetrics(outdir, neuron, xvar, xkey, data_fpaths, metrics_fpaths): metrics = {} for stype in data_fpaths.keys(): if os.path.isfile(metrics_fpaths[stype]): logger.info('loading spiking metrics from file: "%s"', metrics_fpaths[stype]) metrics[stype] = pd.read_csv(metrics_fpaths[stype], sep=',') else: logger.warning('computing %s spiking metrics vs. %s for %s neuron', stype, xkey, neuron) metrics[stype] = computeSpikingMetrics(data_fpaths[stype]) metrics[stype][xkey] = pd.Series(xvar, index=metrics[stype].index) metrics[stype].to_csv(metrics_fpaths[stype], sep=',', index=False) return metrics def extractCompTimes(filenames): ''' Extract computation times from a list of simulation files. ''' tcomps = np.empty(len(filenames)) for i, fn in enumerate(filenames): logger.info('Loading data from "%s"', fn) with open(fn, 'rb') as fh: frame = pickle.load(fh) meta = frame['meta'] tcomps[i] = meta['tcomp'] return tcomps def getCompTimesQuant(outdir, neuron, xvars, xkey, data_fpaths, comptimes_fpath): if os.path.isfile(comptimes_fpath): logger.info('reading computation times from file: "%s"', comptimes_fpath) comptimes = pd.read_csv(comptimes_fpath, sep=',', index_col=xkey) else: logger.warning('extracting computation times for %s neuron', neuron) comptimes = pd.DataFrame(index=xvars) for stype in data_fpaths.keys(): for i, xvar in enumerate(xvars): comptimes.loc[xvar, stype] = extractCompTimes([data_fpaths[stype][i]]) comptimes.to_csv(comptimes_fpath, sep=',', index_label=xkey) return comptimes diff --git a/scripts/generate_mod_file.py b/scripts/generate_mod_file.py index 153d490..0d8f542 100644 --- a/scripts/generate_mod_file.py +++ b/scripts/generate_mod_file.py @@ -1,37 +1,41 @@ # -*- coding: utf-8 -*- # @Author: Theo # @Date: 2019-03-18 18:06:20 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-04-30 13:38:11 +# @Last Modified time: 2019-06-06 15:10:17 import os import logging from argparse import ArgumentParser -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron from PySONIC.utils import logger, selectDirDialog from PySONIC.core import NmodlGenerator def main(): ap = ArgumentParser() ap.add_argument('-n', '--neuron', type=str, default='RS', help='Neuron name (string)') ap.add_argument('-o', '--outputdir', type=str, default=None, help='Output directory') logger.setLevel(logging.INFO) args = ap.parse_args() - neuron = getNeuronsDict()[args.neuron]() + try: + neuron = getPointNeuron(args.neuron) + except ValueError as err: + logger.error(err) + return outdir = args.outputdir if args.outputdir is not None else selectDirDialog() if outdir == '': logger.error('No output directory selected') quit() outfile = '{}.mod'.format(args.neuron) outpath = os.path.join(outdir, outfile) gen = NmodlGenerator(neuron) logger.info('generating %s neuron MOD file in "%s"', neuron.name, outdir) gen.print(outpath) if __name__ == '__main__': main() diff --git a/scripts/plot_effective_variables.py b/scripts/plot_effective_variables.py index 14e7705..1da174c 100644 --- a/scripts/plot_effective_variables.py +++ b/scripts/plot_effective_variables.py @@ -1,73 +1,76 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-02-15 15:59:37 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-11 14:35:27 +# @Last Modified time: 2019-06-06 15:09:23 ''' Plot the effective variables as a function of charge density with color code. ''' import logging import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.plt import plotEffectiveVariables from PySONIC.utils import logger, Intensity2Pressure, getLowIntensitiesSTN -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron # Set logging level logger.setLevel(logging.INFO) def main(): ap = ArgumentParser() # Stimulation parameters ap.add_argument('-n', '--neuron', type=str, default='RS', help='Neuron name (string)') ap.add_argument('-a', '--radius', type=float, default=None, help='Sonophore radius (nm)') ap.add_argument('-f', '--freq', type=float, default=None, help='US frequency (kHz)') ap.add_argument('-A', '--amp', type=float, default=None, help='Acoustic pressure amplitude (kPa)') ap.add_argument('--log', action='store_true', default=False, help='Log color scale') ap.add_argument('-c', '--cmap', type=str, default=None, help='Colormap name') ap.add_argument('--ncol', type=int, default=1, help='Number of columns in figure') ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') # Parse arguments args = {key: value for key, value in vars(ap.parse_args()).items() if value is not None} neuron_str = args['neuron'] a = args['radius'] * 1e-9 if 'radius' in args else None # m Fdrive = args['freq'] * 1e3 if 'freq' in args else None # Hz Adrive = args['amp'] * 1e3 if 'amp' in args else None # Pa # Range of intensities if neuron_str == 'STN': intensities = getLowIntensitiesSTN() # W/m2 Adrive = Intensity2Pressure(intensities) # Pa zscale = 'log' if args['log'] else 'lin' cmap = args.get('cmap', None) ncol = args['ncol'] loglevel = logging.DEBUG if args['verbose'] is True else logging.INFO logger.setLevel(loglevel) - # Plot effective variables - if neuron_str not in getNeuronsDict(): - logger.error('Unknown neuron type: "%s"', neuron_str) + # Check neuron name validity + try: + neuron = getPointNeuron(neuron_str) + except ValueError as err: + logger.error(err) return - neuron = getNeuronsDict()[neuron_str]() + + # Plot effective variables plotEffectiveVariables(neuron, a=a, Fdrive=Fdrive, Adrive=Adrive, zscale=zscale, cmap=cmap, ncolmax=ncol) plt.show() if __name__ == '__main__': main() diff --git a/scripts/plot_gating_kinetics.py b/scripts/plot_gating_kinetics.py index dd163a1..ac571f7 100644 --- a/scripts/plot_gating_kinetics.py +++ b/scripts/plot_gating_kinetics.py @@ -1,131 +1,134 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2016-10-11 20:35:38 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-14 23:37:42 +# @Last Modified time: 2019-06-06 15:07:26 ''' Plot the voltage-dependent steady-states and time constants of activation and inactivation gates of the different ionic currents involved in the neuron's membrane dynamics. ''' import numpy as np import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.utils import logger -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron # Default parameters defaults = dict( neuron='RS' ) def plotGatingKinetics(neuron, fs=15): ''' Plot the voltage-dependent steady-states and time constants of activation and inactivation gates of the different ionic currents involved in a specific neuron's membrane. :param neuron: specific channel mechanism object :param fs: labels and title font size ''' # Input membrane potential vector Vm = np.linspace(-100, 50, 300) xinf_dict = {} taux_dict = {} logger.info('Computing %s neuron gating kinetics', neuron.name) names = neuron.states for xname in names: Vm_state = True # Names of functions of interest xinf_func_str = xname.lower() + 'inf' taux_func_str = 'tau' + xname.lower() alphax_func_str = 'alpha' + xname.lower() betax_func_str = 'beta' + xname.lower() # derx_func_str = 'der' + xname.upper() # 1st choice: use xinf and taux function if hasattr(neuron, xinf_func_str) and hasattr(neuron, taux_func_str): xinf_func = getattr(neuron, xinf_func_str) taux_func = getattr(neuron, taux_func_str) xinf = np.array([xinf_func(v) for v in Vm]) if isinstance(taux_func, float): taux = taux_func * np.ones(len(Vm)) else: taux = np.array([taux_func(v) for v in Vm]) # 2nd choice: use alphax and betax functions elif hasattr(neuron, alphax_func_str) and hasattr(neuron, betax_func_str): alphax_func = getattr(neuron, alphax_func_str) betax_func = getattr(neuron, betax_func_str) alphax = np.array([alphax_func(v) for v in Vm]) if isinstance(betax_func, float): betax = betax_func * np.ones(len(Vm)) else: betax = np.array([betax_func(v) for v in Vm]) taux = 1.0 / (alphax + betax) xinf = taux * alphax # # 3rd choice: use derX choice # elif hasattr(neuron, derx_func_str): # derx_func = getattr(neuron, derx_func_str) # xinf = brentq(lambda x: derx_func(neuron.Vm, x), 0, 1) else: Vm_state = False if not Vm_state: logger.error('no function to compute %s-state gating kinetics', xname) else: xinf_dict[xname] = xinf taux_dict[xname] = taux fig, axes = plt.subplots(2) fig.suptitle('{} neuron: gating dynamics'.format(neuron.name)) ax = axes[0] ax.get_xaxis().set_ticklabels([]) ax.set_ylabel('$X_{\infty}$', fontsize=fs) for xname in names: if xname in xinf_dict: ax.plot(Vm, xinf_dict[xname], lw=2, label='$' + xname + '_{\infty}$') ax.legend(fontsize=fs, loc=7) ax = axes[1] ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) ax.set_ylabel('$\\tau_X\ (ms)$', fontsize=fs) for xname in names: if xname in taux_dict: ax.plot(Vm, taux_dict[xname] * 1e3, lw=2, label='$\\tau_{' + xname + '}$') ax.legend(fontsize=fs, loc=7) return fig def main(): ap = ArgumentParser() # Stimulation parameters ap.add_argument('-n', '--neuron', type=str, default=defaults['neuron'], help='Neuron name (string)') # Parse arguments args = ap.parse_args() neuron_str = args.neuron - # Plot gating kinetics variables - if neuron_str not in getNeuronsDict(): - logger.error('Unknown neuron type: "%s"', neuron_str) + # Check neuron name validity + try: + neuron = getPointNeuron(neuron_str) + except ValueError as err: + logger.error(err) return - neuron = getNeuronsDict()[neuron_str]() + + # Plot gating kinetics variables plotGatingKinetics(neuron) plt.show() if __name__ == '__main__': main() diff --git a/scripts/plot_rheobase_amps.py b/scripts/plot_rheobase_amps.py index 0708f71..8d84c4b 100644 --- a/scripts/plot_rheobase_amps.py +++ b/scripts/plot_rheobase_amps.py @@ -1,69 +1,69 @@ # -*- coding: utf-8 -*- # @Author: Theo # @Date: 2018-04-30 21:06:10 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2018-11-21 16:27:13 +# @Last Modified time: 2019-06-06 15:08:17 ''' Plot duty-cycle dependent rheobase acoustic amplitudes of various neurons for a specific US frequency and PRF. ''' import logging import numpy as np import matplotlib.pyplot as plt from argparse import ArgumentParser from PySONIC.utils import logger -# from PySONIC.core import NeuronalBilayerSonophore -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron from PySONIC.plt import plotAstimRheobaseAmps, plotEstimRheobaseAmps # Set logging level logger.setLevel(logging.INFO) # Default parameters defaults = dict( neuron='RS', radii=[32.0], freqs=[500.0] ) def main(): ap = ArgumentParser() # Stimulation parameters ap.add_argument('-n', '--neuron', type=str, default=defaults['neuron'], help='Neuron name (string)') ap.add_argument('-a', '--radii', type=float, nargs='+', default=defaults['radii'], help='Sonophore radius (nm)') ap.add_argument('-f', '--freqs', type=float, nargs='+', default=defaults['freqs'], help='US frequency (kHz)') ap.add_argument('-m', '--mode', type=str, default='US', help='Stimulation modality (US or elec)') # Parse arguments args = {key: value for key, value in vars(ap.parse_args()).items() if value is not None} mode = args['mode'] # Get neurons objects from names neuron_str = args.get('neuron', defaults['neuron']) - if neuron_str not in getNeuronsDict(): - logger.error('Invalid neuron type: "%s"', neuron_str) + try: + neuron = getPointNeuron(neuron_str) + except ValueError as err: + logger.error(err) return - neuron = getNeuronsDict()[neuron_str]() if mode == 'US': radii = np.array(args['radii']) * 1e-9 # m freqs = np.array(args['freqs']) * 1e3 # Hz plotAstimRheobaseAmps(neuron, radii, freqs) elif mode == 'elec': plotEstimRheobaseAmps(neuron) else: logger.error('Invalid stimulation type: "%s"', mode) return plt.show() if __name__ == '__main__': main() diff --git a/scripts/plot_tprofiles.py b/scripts/plot_timeseries.py similarity index 82% rename from scripts/plot_tprofiles.py rename to scripts/plot_timeseries.py index a771adc..6610010 100644 --- a/scripts/plot_tprofiles.py +++ b/scripts/plot_timeseries.py @@ -1,65 +1,71 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-02-13 12:41:26 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-03-15 01:11:57 +# @Last Modified time: 2019-06-06 18:13:12 ''' Plot temporal profiles of specific simulation output variables. ''' import logging from argparse import ArgumentParser import matplotlib.pyplot as plt from PySONIC.utils import logger, OpenFilesDialog, selectDirDialog -from PySONIC.plt import plotComp, plotBatch +from PySONIC.plt import ComparativePlot, SchemePlot # Set logging level logger.setLevel(logging.INFO) def main(): ap = ArgumentParser() # Runtime options ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('--hide', default=False, action='store_true', help='Hide output') ap.add_argument('-o', '--outputdir', type=str, default=None, help='Output directory') ap.add_argument('-c', '--compare', default=False, action='store_true', help='Comparative graph') ap.add_argument('-s', '--save', default=False, action='store_true', help='Save output') ap.add_argument('-p', '--plot', type=str, nargs='+', default=None, help='Variables to plot') ap.add_argument('-f', '--frequency', type=int, default=1, help='Sampling frequency for plot') # Parse arguments args = {key: value for key, value in vars(ap.parse_args()).items() if value is not None} logger.setLevel(logging.DEBUG if args['verbose'] else logging.INFO) # Select data files pkl_filepaths, _ = OpenFilesDialog('pkl') if not pkl_filepaths: logger.error('No input file') return # Plot appropriate graph if args['compare']: if 'plot' not in args: logger.error('Plot variable must be specified') quit() try: - plotComp(pkl_filepaths, varname=args['plot'][0]) + comp_plot = ComparativePlot(pkl_filepaths, args['plot'][0]) + comp_plot.render() except KeyError as e: logger.error(e) quit() else: pltscheme = {key: [key] for key in args['plot']} if 'plot' in args else None if 'outputdir' not in args: args['outputdir'] = selectDirDialog() if args['save'] else None - plotBatch(pkl_filepaths, title=True, pltscheme=pltscheme, directory=args['outputdir'], - plt_save=args['save'], ask_before_save=not args['save']) + scheme_plot = SchemePlot(pkl_filepaths, pltscheme=pltscheme) + scheme_plot.render( + title=True, + save=args['save'], + ask_before_save=not args['save'], + directory=args['outputdir'] + ) if not args['hide']: plt.show() if __name__ == '__main__': main() diff --git a/scripts/run_astim.py b/scripts/run_astim.py index d6505a8..652e267 100644 --- a/scripts/run_astim.py +++ b/scripts/run_astim.py @@ -1,55 +1,54 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-02-13 18:16:09 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 15:50:34 +# @Last Modified time: 2019-06-06 18:20:17 ''' Run A-STIM simulations of a specific point-neuron. ''' import matplotlib.pyplot as plt from PySONIC.core import NeuronalBilayerSonophore, Batch from PySONIC.utils import logger -from PySONIC.plt import plotBatch +from PySONIC.plt import SchemePlot from PySONIC.parsers import AStimParser def main(): # Parse command line arguments parser = AStimParser() parser.addOutputDir() args = parser.parse() logger.setLevel(args['loglevel']) args['outputdir'] = parser.parseOutputDir(args) # Run A-STIM batch logger.info("Starting A-STIM simulation batch") pkl_filepaths = [] for a in args['radius']: for neuron in args['neuron']: nbls = NeuronalBilayerSonophore(a, neuron) queue = nbls.simQueue( args['freq'], args['amp'], args['tstim'], args['toffset'], args['PRF'], args['DC'], args['method'][0] ) for item in queue: item.insert(0, args['outputdir']) batch = Batch(nbls.runAndSave, queue) pkl_filepaths += batch(mpi=args['mpi'], loglevel=args['loglevel']) # Plot resulting profiles if args['plot'] is not None: - pltscheme = parser.parsePltScheme(args) - plotBatch(pkl_filepaths, pltscheme=pltscheme) + SchemePlot(pkl_filepaths, pltscheme=parser.parsePltScheme(args))() plt.show() if __name__ == '__main__': main() diff --git a/scripts/run_estim.py b/scripts/run_estim.py index 89410e3..bc4cbbd 100644 --- a/scripts/run_estim.py +++ b/scripts/run_estim.py @@ -1,49 +1,48 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-08-24 11:55:07 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 15:51:04 +# @Last Modified time: 2019-06-06 18:20:45 ''' Run E-STIM simulations of a specific point-neuron. ''' import matplotlib.pyplot as plt from PySONIC.core import Batch from PySONIC.utils import logger -from PySONIC.plt import plotBatch +from PySONIC.plt import SchemePlot from PySONIC.parsers import EStimParser def main(): # Parse command line arguments parser = EStimParser() parser.addOutputDir() args = parser.parse() logger.setLevel(args['loglevel']) args['outputdir'] = parser.parseOutputDir(args) # Run E-STIM batch logger.info("Starting E-STIM simulation batch") pkl_filepaths = [] for neuron in args['neuron']: queue = neuron.simQueue( args['amp'], args['tstim'], args['toffset'], args['PRF'], args['DC'], ) for item in queue: item.insert(0, args['outputdir']) batch = Batch(neuron.runAndSave, queue) pkl_filepaths += batch(mpi=args['mpi'], loglevel=args['loglevel']) # Plot resulting profiles if args['plot'] is not None: - pltscheme = parser.parsePltScheme(args) - plotBatch(pkl_filepaths, pltscheme=pltscheme) + SchemePlot(pkl_filepaths, pltscheme=parser.parsePltScheme(args))() plt.show() if __name__ == '__main__': main() diff --git a/scripts/run_lookups.py b/scripts/run_lookups.py index b888630..8ca5812 100644 --- a/scripts/run_lookups.py +++ b/scripts/run_lookups.py @@ -1,217 +1,218 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2017-06-02 17:50:10 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-02 13:45:43 +# @Last Modified time: 2019-06-06 15:05:34 ''' Create lookup table for specific neuron. ''' import os import itertools import pickle import logging import numpy as np from argparse import ArgumentParser from PySONIC.utils import logger, getNeuronLookupsFile -from PySONIC.neurons import getNeuronsDict +from PySONIC.neurons import getPointNeuron from PySONIC.core import NeuronalBilayerSonophore, createQueue, Batch # Default parameters defaults = dict( neuron='RS', radius=np.array([16.0, 32.0, 64.0]), # nm freq=np.array([20., 100., 500., 1e3, 2e3, 3e3, 4e3]), # kHz amp=np.insert(np.logspace(np.log10(0.1), np.log10(600), num=50), 0, 0.0), # kPa ) def computeAStimLookups(neuron, aref, fref, Aref, Qref, fsref=None, mpi=False, loglevel=logging.INFO): ''' Run simulations of the mechanical system for a multiple combinations of imposed sonophore radius, US frequencies, acoustic amplitudes charge densities and (spatially-averaged) sonophore membrane coverage fractions, compute effective coefficients and store them in a dictionary of n-dimensional arrays. :param neuron: neuron object :param aref: array of sonophore radii (m) :param fref: array of acoustic drive frequencies (Hz) :param Aref: array of acoustic drive amplitudes (Pa) :param Qref: array of membrane charge densities (C/m2) :param fsref: acoustic drive phase (rad) :param mpi: boolean statting wether or not to use multiprocessing :param loglevel: logging level :return: lookups dictionary ''' descs = { 'a': 'sonophore radii', 'f': 'US frequencies', 'A': 'US amplitudes', 'fs': 'sonophore membrane coverage fractions' } # Populate inputs dictionary inputs = { 'a': aref, # nm 'f': fref, # Hz 'A': Aref, # Pa 'Q': Qref # C/m2 } # Add fs to inputs if provided, otherwise add default value (1) err_fs = 'cannot span {} for more than 1 {}' if fsref is not None: for x in ['a', 'f']: assert inputs[x].size == 1, err_fs.format(descs['fs'], descs[x]) inputs['fs'] = fsref else: inputs['fs'] = np.array([1.]) # Check validity of input parameters for key, values in inputs.items(): if not (isinstance(values, list) or isinstance(values, np.ndarray)): raise TypeError( 'Invalid {} (must be provided as list or numpy array)'.format(descs[key])) if not all(isinstance(x, float) for x in values): raise TypeError('Invalid {} (must all be float typed)'.format(descs[key])) if len(values) == 0: raise ValueError('Empty {} array'.format(key)) if key in ('a', 'f') and min(values) <= 0: raise ValueError('Invalid {} (must all be strictly positive)'.format(descs[key])) if key in ('A', 'fs') and min(values) < 0: raise ValueError('Invalid {} (must all be positive or null)'.format(descs[key])) # Get dimensions of inputs that have more than one value dims = np.array([x.size for x in inputs.values()]) dims = dims[dims > 1] ncombs = dims.prod() # Create simulation queue per radius queue = createQueue(fref, Aref, Qref) for i in range(len(queue)): queue[i].append(inputs['fs']) # Run simulations and populate outputs (list of lists) logger.info('Starting simulation batch for %s neuron', neuron.name) outputs = [] for a in aref: nbls = NeuronalBilayerSonophore(a, neuron) batch = Batch(nbls.computeEffVars, queue) outputs += batch(mpi=mpi, loglevel=loglevel) # Split comp times and effvars from outputs tcomps, effvars = [list(x) for x in zip(*outputs)] effvars = list(itertools.chain.from_iterable(effvars)) # Reshape effvars into nD arrays and add them to lookups dictionary logger.info('Reshaping output into lookup tables') varkeys = list(effvars[0].keys()) nout = len(effvars) assert nout == ncombs, 'number of outputs does not match number of combinations' lookups = {} for key in varkeys: effvar = [effvars[i][key] for i in range(nout)] lookups[key] = np.array(effvar).reshape(dims) # Reshape comp times into nD array (minus fs dimension) if fsref is not None: dims = dims[:-1] tcomps = np.array(tcomps).reshape(dims) # Store inputs, lookup data and comp times in dictionary df = { 'input': inputs, 'lookup': lookups, 'tcomp': tcomps } return df def main(): ap = ArgumentParser() # Runtime options ap.add_argument('--mpi', default=False, action='store_true', help='Use multiprocessing') ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Increase verbosity') ap.add_argument('-t', '--test', default=False, action='store_true', help='Test configuration') # Stimulation parameters ap.add_argument('-n', '--neuron', type=str, default=defaults['neuron'], help='Neuron name (string)') ap.add_argument('-a', '--radius', nargs='+', type=float, help='Sonophore radius (nm)') ap.add_argument('-f', '--freq', nargs='+', type=float, help='US frequency (kHz)') ap.add_argument('-A', '--amp', nargs='+', type=float, help='Acoustic pressure amplitude (kPa)') ap.add_argument('-Q', '--charge', nargs='+', type=float, help='Membrane charge density (nC/cm2)') ap.add_argument('--spanFs', default=False, action='store_true', help='Span sonophore coverage fraction') # Parse arguments args = {key: value for key, value in vars(ap.parse_args()).items() if value is not None} loglevel = logging.DEBUG if args['verbose'] is True else logging.INFO logger.setLevel(loglevel) mpi = args['mpi'] neuron_str = args['neuron'] radii = np.array(args.get('radius', defaults['radius'])) * 1e-9 # m freqs = np.array(args.get('freq', defaults['freq'])) * 1e3 # Hz amps = np.array(args.get('amp', defaults['amp'])) * 1e3 # Pa # Check neuron name validity - if neuron_str not in getNeuronsDict(): - logger.error('Unknown neuron type: "%s"', neuron_str) + try: + neuron = getPointNeuron(neuron_str) + except ValueError as err: + logger.error(err) return - neuron = getNeuronsDict()[neuron_str]() # Determine charge vector if 'charge' in args: charges = np.array(args['charge']) * 1e-5 # C/m2 else: charges = np.arange(neuron.Qbounds()[0], neuron.Qbounds()[1] + 1e-5, 1e-5) # C/m2 # Determine fs vector fs = None if args['spanFs']: fs = np.linspace(0, 100, 101) * 1e-2 # (-) # Determine output filename lookup_path = { True: getNeuronLookupsFile(neuron.name), False: getNeuronLookupsFile(neuron.name, a=radii[0], Fdrive=freqs[0], fs=True) }[fs is None] # Combine inputs into single list inputs = [radii, freqs, amps, charges, fs] # Adapt inputs and output filename if test case if args['test']: for i, x in enumerate(inputs): if x is not None and x.size > 1: inputs[i] = np.array([x.min(), x.max()]) lookup_path = '{}_test{}'.format(*os.path.splitext(lookup_path)) # Check if lookup file already exists if os.path.isfile(lookup_path): logger.warning('"%s" file already exists and will be overwritten. ' + 'Continue? (y/n)', lookup_path) user_str = input() if user_str not in ['y', 'Y']: logger.error('%s Lookup creation canceled', neuron.name) return # Compute lookups df = computeAStimLookups(neuron, *inputs, mpi=mpi, loglevel=loglevel) # Save dictionary in lookup file logger.info('Saving %s neuron lookup table in file: "%s"', neuron.name, lookup_path) with open(lookup_path, 'wb') as fh: pickle.dump(df, fh) if __name__ == '__main__': main() diff --git a/scripts/run_mech.py b/scripts/run_mech.py index 9f08c59..2d16650 100644 --- a/scripts/run_mech.py +++ b/scripts/run_mech.py @@ -1,49 +1,48 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Date: 2016-11-21 10:46:56 # @Email: theo.lemaire@epfl.ch # @Last Modified by: Theo Lemaire -# @Last Modified time: 2019-06-03 15:51:13 +# @Last Modified time: 2019-06-06 18:21:13 ''' Run simulations of the NICE mechanical model. ''' import matplotlib.pyplot as plt from PySONIC.core import BilayerSonophore, Batch from PySONIC.utils import logger -from PySONIC.plt import plotBatch +from PySONIC.plt import SchemePlot from PySONIC.parsers import MechSimParser def main(): # Parse command line arguments parser = MechSimParser() parser.addOutputDir() args = parser.parse() logger.setLevel(args['loglevel']) args['outputdir'] = parser.parseOutputDir(args) # Run MECH batch logger.info("Starting mechanical simulation batch") pkl_filepaths = [] for a in args['radius']: for d in args['embedding']: for Cm0 in args['Cm0']: for Qm0 in args['Qm0']: bls = BilayerSonophore(a, Cm0, Qm0, embedding_depth=d) queue = bls.simQueue(args['freq'], args['amp'], args['charge']) for item in queue: item.insert(0, args['outputdir']) batch = Batch(bls.runAndSave, queue) pkl_filepaths += batch(mpi=args['mpi'], loglevel=args['loglevel']) # Plot resulting profiles if args['plot'] is not None: - pltscheme = parser.parsePltScheme(args) - plotBatch(pkl_filepaths, pltscheme=pltscheme) + SchemePlot(pkl_filepaths, pltscheme=parser.parsePltScheme(args))() plt.show() if __name__ == '__main__': main()