diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2ca70ab --- /dev/null +++ b/.gitignore @@ -0,0 +1,110 @@ +# Sphinx tools and doc +_build/ +_static/ +_templates/ +Makefile +*.bat +conf.py + +# Sublime Workspace files +*.sublime-workspace + +# PKL files +*.pkl + +# Excel files +*.xlsx + +# PNG images +*.png + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +.venv/ +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..c2b6e9a --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 TNE lab, EPFL + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..ba44dc1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include README.md +include /lookups/*/*.pkl +include /templates/*.xlsx \ No newline at end of file diff --git a/PointNICE.sublime-project b/PointNICE.sublime-project new file mode 100644 index 0000000..0cec71a --- /dev/null +++ b/PointNICE.sublime-project @@ -0,0 +1,37 @@ +{ + "build_systems": + [ + { + "file_regex": "^[ ]*File \"(...*?)\", line ([0-9]*)", + "name": "Anaconda Python Builder", + "selector": "source.python", + "shell_cmd": "\"python\" -u \"$file\"" + } + ], + "folders": + [ + { + "file_exclude_patterns": + [ + "*.sublime-workspace", + "MANIFEST.in", + "LICENSE", + "conf.py", + "index.rst", + "*.gitignore", + "__init__.py" + ], + "folder_exclude_patterns": + [ + "deprecated", + "docs", + "lookups", + "graphs", + "templates", + "*.egg-info" + ], + "path": "." + } + ], + "translate_tabs_to_spaces": true +} diff --git a/PointNICE/__init__.py b/PointNICE/__init__.py new file mode 100644 index 0000000..6e2df29 --- /dev/null +++ b/PointNICE/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-06 13:36:00 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-21 21:11:37 + +''' Import the core classes, generic utilities and algorithmic constants. ''' + +from .bls import BilayerSonophore +from .solvers import * +from .channels import * +from .utils import * +from .constants import * +from .pltvars import * \ No newline at end of file diff --git a/PointNICE/bls.py b/PointNICE/bls.py new file mode 100644 index 0000000..e87ba32 --- /dev/null +++ b/PointNICE/bls.py @@ -0,0 +1,587 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-09-29 16:16:19 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-21 17:39:48 + +import logging +import warnings +import numpy as np +import scipy.integrate as integrate +from scipy.optimize import brentq, curve_fit +from .utils import * +from .constants import * + + +# Get package logger +logger = logging.getLogger('PointNICE') + + +class BilayerSonophore: + """ This class contains the geometric and mechanical parameters of the + Bilayer Sonophore Model, as well as all the core functions needed to + compute the dynamics (kinetics and kinematics) of the bilayer membrane + cavitation, and run dynamic BLS simulations. """ + + def __init__(self, geom, params, Fdrive, Cm0, Qm0): + """ Constructor of the class. + :param geom: BLS geometric constants dictionary + :param params: BLS biomechanical and biophysical parameters dictionary + :param Fdrive: frequency of acoustic perturbation (Hz) + :param Cm0: membrane resting capacitance (F/m2) + :param Qm0: membrane resting charge density (C/m2) + """ + + logger.info('BLS initialization at %.2f kHz, %.2f nC/cm2', Fdrive * 1e-3, Qm0 * 1e5) + + # Assign biomechanical and biophysical parameters as direct class attributes + for key, value in params["biomech"].items(): + setattr(self, key, value) + for key, value in params["biophys"].items(): + setattr(self, key, value) + + # Extract resting constants and geometry + self.Cm0 = Cm0 + self.Qm0 = Qm0 + self.a = geom['a'] + self.d = geom['d'] + self.S0 = np.pi * self.a**2 + + # Derive frequency-dependent tissue elastic modulus + G_tissue = self.alpha * Fdrive # G'' (Pa) + self.kA_tissue = 2 * G_tissue * self.d # kA of the tissue layer (N/m) + + # Find Delta that cancels out Pm + Pec at Z = 0, i.e. the initial gap + # between the two leaflets on a charged membrane at equilibrium (m) + if self.Qm0 == 0.0: + self.Delta = self.Delta_ + else: + (D_eq, Pnet_eq) = self.findDeltaEq(self.Qm0) + assert Pnet_eq < PNET_EQ_MAX, 'High Pnet at Z = 0 with Delta = %.2f nm' % (D_eq * 1e9) + self.Delta = D_eq + + (LJ_approx, std_err, _) = self.LJfitPMavg() + assert std_err < PMAVG_STD_ERR_MAX, 'High error in PmAvg nonlinear fit:'\ + ' std_err = %.2f Pa' % std_err + self.LJ_approx = LJ_approx + + # Compute initial volume and gas content + self.V0 = np.pi * self.Delta * self.a**2 + self.ng0 = self.gasPa2mol(self.P0, self.V0) + + + def curvrad(self, Z): + """ Return the (signed) instantaneous curvature radius of the leaflet. + + :param Z: leaflet apex outward deflection value (m) + :return: leaflet curvature radius (m) + """ + if Z == 0.0: + return np.inf + else: + return (self.a**2 + Z**2) / (2 * Z) + + + def surface(self, Z): + """ Return the surface area of the stretched leaflet (spherical cap). + + :param Z: leaflet apex outward deflection value (m) + :return: surface of the stretched leaflet (m^2) + """ + return np.pi * (self.a**2 + Z**2) + + + def volume(self, Z): + """ Return the total volume of the inter-leaflet space (cylinder +/- + spherical cap). + + :param Z: leaflet apex outward deflection value (m) + :return: inner volume of the bilayer sonophore structure (m^3) + """ + return np.pi * self.a**2 * self.Delta\ + * (1 + (Z / (3 * self.Delta) * (3 + Z**2 / self.a**2))) + + + def arealstrain(self, Z): + """ Compute the areal strain of the stretched leaflet. + epsilon = (S - S0)/S0 = (Z/a)^2 + + :param Z: leaflet apex outward deflection value (m) + :return: areal strain (dimensionless) + """ + return (Z / self.a)**2 + + + def Capct(self, Z): + """ Compute the membrane capacitance per unit area, + under the assumption of parallel-plate capacitor + with average inter-layer distance. + + :param Z: leaflet apex outward deflection value (m) + :return: capacitance per unit area (F/m2) + """ + if Z == 0.0: + return self.Cm0 + else: + return ((self.Cm0 * self.Delta / self.a**2) * + (Z + (self.a**2 - Z**2 - Z * self.Delta) / (2 * Z) * + np.log((2 * Z + self.Delta) / self.Delta))) + + + def derCapct(self, Z, U): + """ Compute the derivative of the membrane capacitance per unit area + with respect to time, under the assumption of parallel-plate capacitor. + + :param Z: leaflet apex outward deflection value (m) + :param U: leaflet apex outward deflection velocity (m/s) + :return: derivative of capacitance per unit area (F/m2.s) + """ + dCmdZ = ((self.Cm0 * self.Delta / self.a**2) * + ((Z**2 + self.a**2) / (Z * (2 * Z + self.Delta)) - + ((Z**2 + self.a**2) * + np.log((2 * Z + self.Delta) / self.Delta)) / (2 * Z**2))) + return dCmdZ * U + + + def localdef(self, r, Z, R): + """ Compute the (signed) local transverse leaflet deviation at a distance + r from the center of the dome. + + :param r: in-plane distance from center of the sonophore (m) + :param Z: leaflet apex outward deflection value (m) + :param R: leaflet curvature radius (m) + :return: local transverse leaflet deviation (m) + """ + if np.abs(Z) == 0.0: + return 0.0 + else: + return np.sign(Z) * (np.sqrt(R**2 - r**2) - np.abs(R) + np.abs(Z)) + + + def Pacoustic(self, t, Adrive, Fdrive, phi=np.pi): + """ Compute the acoustic pressure at a specific time, given + the amplitude, frequency and phase of the acoustic stimulus. + + :param t: time of interest + :param Adrive: acoustic drive amplitude (Pa) + :param Fdrive: acoustic drive frequency (Hz) + :param phi: acoustic drive phase (rad) + """ + return Adrive * np.sin(2 * np.pi * Fdrive * t - phi) + + + def PMlocal(self, r, Z, R): + """ Compute the local intermolecular pressure. + + :param r: in-plane distance from center of the sonophore (m) + :param Z: leaflet apex outward deflection value (m) + :param R: leaflet curvature radius (m) + :return: local intermolecular pressure (Pa) + """ + z = self.localdef(r, Z, R) + relgap = (2 * z + self.Delta) / self.Delta_ + return self.pDelta * ((1 / relgap)**self.m - (1 / relgap)**self.n) + + + def PMavg(self, Z, R, S): + """ Compute the average intermolecular pressure felt across the leaflet + by quadratic integration. + + :param Z: leaflet apex outward deflection value (m) + :param R: leaflet curvature radius (m) + :param S: surface of the stretched leaflet (m^2) + :return: averaged intermolecular resultant pressure across the leaflet (Pa) + + .. warning:: quadratic integration is computationally expensive. + """ + # Intermolecular force over an infinitely thin ring of radius r + fMring = lambda r, Z, R: 2 * np.pi * r * self.PMlocal(r, Z, R) + + # Integrate from 0 to a + fTotal, _ = integrate.quad(fMring, 0, self.a, args=(Z, R)) + return fTotal / S + + + def LJfitPMavg(self): + """ Determine optimal parameters of a Lennard-Jones expression + approximating the average intermolecular pressure. + + These parameters are obtained by a nonlinear fit of the + Lennard-Jones function for a range of deflection values + between predetermined Zmin and Zmax. + + :return: 3-tuple with optimized LJ parameters for PmAvg prediction (Map) and + the standard and max errors of the prediction in the fitting range (in Pascals) + """ + + # Determine lower bound of deflection range: when Pm = Pmmax + PMmax = LJFIT_PM_MAX # Pa + Zminlb = -0.49 * self.Delta + Zminub = 0.0 + f = lambda Z, Pmmax: self.PMavg(Z, self.curvrad(Z), self.surface(Z)) - PMmax + Zmin = brentq(f, Zminlb, Zminub, args=(PMmax), xtol=1e-16) + + # Create vectors for geometric variables + Zmax = 2 * self.a + Z = np.arange(Zmin, Zmax, 1e-11) + Pmavg = np.array([self.PMavg(ZZ, self.curvrad(ZZ), self.surface(ZZ)) for ZZ in Z]) + + # Compute optimal nonlinear fit of custom LJ function with initial guess + x0_guess = 2e-9 + C_guess = 1e4 + nrep_guess = 5.0 + nattr_guess = 3.0 + pguess = (x0_guess, C_guess, nrep_guess, nattr_guess) + popt, _ = curve_fit(lambda x, x0, C, nrep, nattr: + LennardJones(x, self.Delta, x0, C, nrep, nattr), + Z, Pmavg, p0=pguess, maxfev=10000) + (x0_opt, C_opt, nrep_opt, nattr_opt) = popt + Pmavg_fit = LennardJones(Z, self.Delta, x0_opt, C_opt, nrep_opt, nattr_opt) + + # Compute prediction error + residuals = Pmavg - Pmavg_fit + ss_res = np.sum(residuals**2) + N = residuals.size + std_err = np.sqrt(ss_res / N) + max_err = max(np.abs(residuals)) + + logger.debug('LJ approx: x0 = %.2f nm, C = %.2f kPa, m = %.2f, n = %.2f', + x0_opt * 1e9, C_opt * 1e-3, nrep_opt, nattr_opt) + + LJ_approx = {"x0": x0_opt, "C": C_opt, "nrep": nrep_opt, "nattr": nattr_opt} + return (LJ_approx, std_err, max_err) + + + def PMavgpred(self, Z): + """ Return the predicted intermolecular pressure based on a specific Lennard-Jones + function fitted on the deflection physiological range. + + :param Z: leaflet apex outward deflection value (m) + :return: predicted average intermolecular pressure (Pa) + """ + return LennardJones(Z, self.Delta, self.LJ_approx['x0'], self.LJ_approx['C'], + self.LJ_approx['nrep'], self.LJ_approx['nattr']) + + + def Pelec(self, Z, Qm): + """ Compute the electric equivalent pressure term. + + :param Z: leaflet apex outward deflection value (m) + :param Qm: membrane charge density (C/m2) + :return: electric equivalent pressure (Pa) + """ + relS = self.S0 / self.surface(Z) + abs_perm = self.epsilon0 * self.epsilonR # F/m + return -relS * Qm**2 / (2 * abs_perm) # Pa + + + def findDeltaEq(self, Qm): + """ Compute the Delta that cancels out the (Pm + Pec) equation at Z = 0 + for a given membrane charge density, using the Brent method to refine + the pressure root iteratively. + + :param Qm: membrane charge density (C/m2) + :return: equilibrium value (m) and associated pressure (Pa) + """ + + f = lambda Delta: (self.pDelta * + ((self.Delta_ / Delta)**self.m + - (self.Delta_ / Delta)**self.n) + + self.Pelec(0.0, Qm)) + + Delta_lb = 0.1 * self.Delta_ + Delta_ub = 2.0 * self.Delta_ + + Delta_eq = brentq(f, Delta_lb, Delta_ub, xtol=1e-16) + logger.debug('∆eq = %.2f nm', Delta_eq * 1e9) + return (Delta_eq, f(Delta_eq)) + + + def gasflux(self, Z, P): + """ Compute the gas molar flux through the BLS boundary layer for + an unsteady system. + + :param Z: leaflet apex outward deflection value (m) + :param P: internal gas pressure in the inter-leaflet space (Pa) + :return: gas molar flux (mol/s) + """ + dC = self.C0 - P / self.kH + return 2 * self.surface(Z) * self.Dgl * dC / self.xi + + + def gasmol2Pa(self, ng, V): + """ Compute the gas pressure in the inter-leaflet space for an + unsteady system, from the value of gas molar content. + + :param ng: internal molar content (mol) + :param V: inner volume of the bilayer sonophore structure (m^3) + :return: internal gas pressure (Pa) + """ + return ng * self.Rg * self.T / V + + + def gasPa2mol(self, P, V): + """ Compute the gas molar content in the inter-leaflet space for + an unsteady system, from the value of internal gas pressure. + + :param P: internal gas pressure in the inter-leaflet space (Pa) + :param V: inner volume of the bilayer sonophore structure (m^3) + :return: internal gas molar content (mol) + """ + return P * V / (self.Rg * self.T) + + + def PtotQS(self, Z, ng, Qm, Pac, Pm_comp_method): + """ Compute the balance pressure of the quasi-steady system, upon application + of an external perturbation on a charged membrane: + Ptot = Pm + Pg + Pec - P0 - Pac. + + :param Z: leaflet apex outward deflection value (m) + :param ng: internal molar content (mol) + :param Qm: membrane charge density (C/m2) + :param Pac: external acoustic perturbation (Pa) + :param Pm_comp_method: type of method used to compute average intermolecular pressure + :return: total balance pressure (Pa) + + """ + if Pm_comp_method is PmCompMethod.direct: + Pm = self.PMavg(Z, self.curvrad(Z), self.surface(Z)) + elif Pm_comp_method is PmCompMethod.predict: + Pm = self.PMavgpred(Z) + return Pm + self.gasmol2Pa(ng, self.volume(Z)) - self.P0 - Pac + self.Pelec(Z, Qm) + + + def balancedefQS(self, ng, Qm, Pac=0.0, Pm_comp_method=PmCompMethod.predict): + """ Compute the leaflet deflection upon application of an external + perturbation to a quasi-steady system with a charged membrane. + + This function uses the Brent method (progressive approximation of + function root) to solve the following transcendental equation for Z: + Pm + Pg + Pec - P0 - Pac = 0. + + :param ng: internal molar content (mol) + :param Qm: membrane charge density (C/m2) + :param Pac: external acoustic perturbation (Pa) + :param Pm_comp_method: type of method used to compute average intermolecular pressure + :return: leaflet deflection (Z) canceling out the balance equation + """ + lb = -0.49 * self.Delta + ub = self.a + Plb = self.PtotQS(lb, ng, Qm, Pac, Pm_comp_method) + Pub = self.PtotQS(ub, ng, Qm, Pac, Pm_comp_method) + assert (Plb > 0 > Pub), '[%d, %d] is not a sign changing interval for PtotQS' % (lb, ub) + return brentq(self.PtotQS, lb, ub, args=(ng, Qm, Pac, Pm_comp_method), xtol=1e-16) + + + def TEleaflet(self, Z): + """ Compute the circumferential elastic tension felt across the + entire leaflet upon stretching. + + :param Z: leaflet apex outward deflection value (m) + :return: circumferential elastic tension (N/m) + """ + return self.kA * self.arealstrain(Z) + + + def TEtissue(self, Z): + """ Compute the circumferential elastic tension felt across the + embedding viscoelastic tissue layer upon stretching. + + :param Z: leaflet apex outward deflection value (m) + :return: circumferential elastic tension (N/m) + """ + return self.kA_tissue * self.arealstrain(Z) + + + def TEtot(self, Z): + """ Compute the total circumferential elastic tension (leaflet + and embedding tissue) felt upon stretching. + + :param Z: leaflet apex outward deflection value (m) + :return: circumferential elastic tension (N/m) + """ + return self.TEleaflet(Z) + self.TEtissue(Z) + + + def PEtot(self, Z, R): + """ Compute the total elastic tension pressure (leaflet + embedding + tissue) felt upon stretching. + + :param Z: leaflet apex outward deflection value (m) + :param R: leaflet curvature radius (m) + :return: elastic tension pressure (Pa) + """ + return - self.TEtot(Z) / R + + + def PVleaflet(self, U, R): + """ Compute the viscous stress felt across the entire leaflet + upon stretching. + + :param U: leaflet apex outward deflection velocity (m/s) + :param R: leaflet curvature radius (m) + :return: leaflet viscous stress (Pa) + """ + return - 12 * U * self.delta0 * self.muS / R**2 + + + def PVfluid(self, U, R): + """ Compute the viscous stress felt across the entire fluid + upon stretching. + + :param U: leaflet apex outward deflection velocity (m/s) + :param R: leaflet curvature radius (m) + :return: fluid viscous stress (Pa) + """ + return - 4 * U * self.muL / np.abs(R) + + + def accP(self, Pres, R): + """ Compute the pressure-driven acceleration of the leaflet in the + unsteady system, upon application of an external perturbation. + + :param Pres: net resultant pressure (Pa) + :param R: leaflet curvature radius (m) + :return: pressure-driven acceleration (m/s^2) + """ + return Pres / (self.rhoL * np.abs(R)) + + + def accNL(self, U, R): + """ Compute the non-linear term of the leaflet acceleration in the + unsteady system, upon application of an external perturbation. + + :param U: leaflet apex outward deflection velocity (m/s) + :param R: leaflet curvature radius (m) + :return: nonlinear acceleration (m/s^2) + + .. note:: A simplified version of nonlinear acceleration (neglecting + dR/dH) is used here. + + """ + # return - (3/2 - 2*R/H) * U**2 / R + return -(3 * U**2) / (2 * R) + + + def eqMech(self, t, y, Adrive, Fdrive, Qm, phi): + """ Compute the derivatives of the 3-ODE mechanical system variables, + with an imposed constant charge density. + + :param t: specific instant in time (s) + :param y: vector of HH system variables at time t + :param Adrive: acoustic drive amplitude (Pa) + :param Fdrive: acoustic drive frequency (Hz) + :param Qm: membrane charge density (F/m2) + :param phi: acoustic drive phase (rad) + :return: vector of mechanical system derivatives at time t + """ + + # Split input vector explicitly + (U, Z, ng) = y + + # Compute curvature radius + R = self.curvrad(Z) + + # Compute total pressure + Pg = self.gasmol2Pa(ng, self.volume(Z)) + Ptot = (self.PMavgpred(Z) + Pg - self.P0 - self.Pacoustic(t, Adrive, Fdrive, phi) + + self.PEtot(Z, R) + self.PVleaflet(U, R) + self.PVfluid(U, R) + + self.Pelec(Z, Qm)) + + # Compute derivatives + dUdt = self.accP(Ptot, R) + self.accNL(U, R) + dZdt = U + dngdt = self.gasflux(Z, Pg) + + # Return derivatives vector + return [dUdt, dZdt, dngdt] + + + def runMech(self, Fdrive, Adrive, Qm, phi=np.pi): + """ Compute short solutions of the mechanical system for specific + US stimulation parameters and with an imposed membrane charge density. + + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param phi: acoustic drive phase (rad) + :param Qm: imposed membrane charge density (C/m2) + :return: 3-tuple with the time profile, the solution matrix and a state vector + """ + + # Raise warnings as error + warnings.filterwarnings('error') + + # Initialize mechanical system solvers + solver = integrate.ode(self.eqMech) + solver.set_f_params(Adrive, Fdrive, Qm, phi) + solver.set_integrator('lsoda', nsteps=SOLVER_NSTEPS) + + # Determine mechanical system time step + Tdrive = 1 / Fdrive + dt_mech = Tdrive / NPC_FULL + t_mech_cycle = np.linspace(0, Tdrive - dt_mech, NPC_FULL) + + # Initialize system variables + t0 = 0.0 + Z0 = 0.0 + U0 = 0.0 + ng0 = self.ng0 + + # Solve quasi-steady equation to compute first deflection value + Pac1 = self.Pacoustic(t0 + dt_mech, Adrive, Fdrive, phi) + Z1 = self.balancedefQS(ng0, Qm, Pac1, PmCompMethod.predict) + U1 = (Z1 - Z0) / dt_mech + + # Construct arrays to hold system variables + states = np.array([-1, -1]) + t = np.array([t0, t0 + dt_mech]) + y = np.array([[U0, U1], [Z0, Z1], [ng0, ng0]]) + + # Integrate mechanical system for a few acoustic cycles until stabilization + sim_error = False + periodic_conv = False + j = 0 + ng_last = None + Z_last = None + while not sim_error and not periodic_conv: + t_mech = t_mech_cycle + t[-1] + dt_mech + y_mech = np.empty((3, NPC_FULL)) + y0_mech = y[:, -1] + solver.set_initial_value(y0_mech, t[-1]) + k = 0 + try: # try to integrate and catch errors/warnings + while solver.successful() and k <= NPC_FULL - 1: + solver.integrate(t_mech[k]) + y_mech[:, k] = solver.y + assert (y_mech[1, k] > -0.5 * self.Delta), 'Deflection out of range' + k += 1 + except (Warning, AssertionError) as inst: + sim_error = True + logger.error('Mech. system integration error at step %u', k, extra={inst}) + + # Compare Z and ng signals over the last 2 acoustic periods + if j > 0: + Z_rmse = rmse(Z_last, y_mech[1, :]) + ng_rmse = rmse(ng_last, y_mech[2, :]) + logger.debug('step %u: Z_rmse = %.2e m, ng_rmse = %.2e mol', j, Z_rmse, ng_rmse) + if Z_rmse < Z_ERR_MAX and ng_rmse < NG_ERR_MAX: + periodic_conv = True + + # Update last vectors for next comparison + Z_last = y_mech[1, :] + ng_last = y_mech[2, :] + + # Concatenate time and solutions to global vectors + states = np.concatenate([states, np.ones(NPC_FULL)], axis=0) + t = np.concatenate([t, t_mech], axis=0) + y = np.concatenate([y, y_mech], axis=1) + + # Increment loop index + j += 1 + + logger.debug('Periodic convergence after %u cycles', j) + + # return output variables + return (t, y[1:, :], states) diff --git a/PointNICE/channels/__init__.py b/PointNICE/channels/__init__.py new file mode 100644 index 0000000..49ba6c5 --- /dev/null +++ b/PointNICE/channels/__init__.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-06 13:36:00 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-17 11:38:01 + + +from .base import BaseMech +from .cortical import CorticalRS, CorticalFS, CorticalLTS +from .thalamic import ThalamicRE, ThalamoCortical +from .leech import LeechTouch diff --git a/PointNICE/channels/base.py b/PointNICE/channels/base.py new file mode 100644 index 0000000..1839889 --- /dev/null +++ b/PointNICE/channels/base.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-08-03 11:53:04 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-03 14:41:42 + +''' Module standard API for all neuron mechanisms. + + Each mechanism class can use different methods to define the membrane dynamics of a + specific neuron type. However, they must contain some mandatory attributes and methods + in order to be properly imported in other PointNICE modules and used in NICE simulations. +''' + +import abc + + +class BaseMech(metaclass=abc.ABCMeta): + ''' Abstract class defining the common API (i.e. mandatory attributes and methods) of all + subclasses implementing the channels mechanisms of specific neurons. + + The mandatory attributes are: + - **name**: a string defining the name of the mechanism. + - **Cm0**: a float defining the membrane resting capacitance (in F/m2) + - **Vm0**: a float defining the membrane resting potential (in mV) + - **states_names**: a list of strings defining the names of the different state + probabilities governing the channels behaviour (i.e. the differential HH variables). + - **states0**: a 1D array of floats (NOT integers !!!) defining the initial values of + the different state probabilities. + - **coeff_names**: a list of strings defining the names of the different coefficients + to be used in effective simulations. + + The mandatory methods are: + - **currNet**: compute the net ionic current density (in mA/m2) across the membrane, + given a specific membrane potential (in mV) and channel states. + - **steadyStates**: compute the channels steady-state values for a specific membrane + potential value (in mV). + - **derStates**: compute the derivatives of channel states, given a specific membrane + potential (in mV) and channel states. This method must return a list of derivatives + ordered identically as in the states0 attribute. + - **getEffRates**: get the effective rate constants of ion channels to be used in + effective simulations. This method must return an array of effective rates ordered + identically as in the coeff_names attribute. + - **derStatesEff**: compute the effective derivatives of channel states, based on + 2-dimensional linear interpolators of "effective" coefficients. This method must + return a list of derivatives ordered identically as in the states0 attribute. + ''' + + @property + @abc.abstractmethod + def name(self): + return 'Should never reach here' + + @property + @abc.abstractmethod + def Cm0(self): + return 'Should never reach here' + + @property + @abc.abstractmethod + def Vm0(self): + return 'Should never reach here' + + @property + @abc.abstractmethod + def states_names(self): + return 'Should never reach here' + + @property + @abc.abstractmethod + def states0(self): + return 'Should never reach here' + + @property + @abc.abstractmethod + def coeff_names(self): + return 'Should never reach here' + + + @abc.abstractmethod + def currNet(self, Vm, states): + ''' Compute the net ionic current per unit area. + + :param Vm: membrane potential (mV) + :states: state probabilities of the ion channels + :return: current per unit area (mA/m2) + ''' + + @abc.abstractmethod + def steadyStates(self, Vm): + ''' Compute the channels steady-state values for a specific membrane potential value. + + :param Vm: membrane potential (mV) + :return: array of steady-states + ''' + + @abc.abstractmethod + def derStates(self, Vm, states): + ''' Compute the derivatives of channel states. + + :param Vm: membrane potential (mV) + :states: state probabilities of the ion channels + :return: current per unit area (mA/m2) + ''' + + @abc.abstractmethod + def getEffRates(self, Vm): + ''' Get the effective rate constants of ion channels, averaged along an acoustic cycle, + for future use in effective simulations. + + :param Vm: array of membrane potential values for an acoustic cycle (mV) + :return: an array of rate average constants (s-1) + ''' + + @abc.abstractmethod + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Compute the effective derivatives of channel states, based on + 2-dimensional linear interpolation of "effective" coefficients + that summarize the system's behaviour over an acoustic cycle. + + :param Vm_eff: effective membrane potential (mV) + :states: state probabilities of the ion channels + :param interpolators: dictionary of 2-dimensional linear interpolators + of "effective" rates over the 2D amplitude x charge input domain. + ''' diff --git a/PointNICE/channels/cortical.py b/PointNICE/channels/cortical.py new file mode 100644 index 0000000..3125f19 --- /dev/null +++ b/PointNICE/channels/cortical.py @@ -0,0 +1,572 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-07-31 15:19:51 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 14:49:00 + +''' Channels mechanisms for thalamic neurons. ''' + +import numpy as np +from .base import BaseMech + + +class Cortical(BaseMech): + ''' Class defining the generic membrane channel dynamics of a cortical neuron + with 4 different current types: + - Inward Sodium current + - Outward, delayed-rectifier Potassium current + - Outward, slow non.inactivating Potassium current + - Non-specific leakage current + This generic class cannot be used directly as it does not contain any specific parameters. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, + Y., Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for + different classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + ''' + + # Generic biophysical parameters of cortical cells + Cm0 = 1e-2 # Cell membrane resting capacitance (F/m2) + Vm0 = 0.0 # Dummy value for membrane potential (mV) + VNa = 50.0 # Sodium Nernst potential (mV) + VK = -90.0 # Potassium Nernst potential (mV) + + # Names and initial states of the channels state probabilities + states_names = ['m', 'h', 'n', 'p'] + states0 = np.array([]) + + # Names of the different coefficients to be averaged in a lookup table. + coeff_names = ['alpham', 'betam', 'alphah', 'betah', 'alphan', 'betan', 'alphap', 'betap'] + + def __init__(self): + ''' Constructor of the class ''' + pass + + + def alpham(self, Vm): + ''' Compute the alpha rate for the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (-0.32 * (Vdiff - 13) / (np.exp(- (Vdiff - 13) / 4) - 1)) # ms-1 + return alpha * 1e3 # s-1 + + + def betam(self, Vm): + ''' Compute the beta rate for the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (0.28 * (Vdiff - 40) / (np.exp((Vdiff - 40) / 5) - 1)) # ms-1 + return beta * 1e3 # s-1 + + + def alphah(self, Vm): + ''' Compute the alpha rate for the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (0.128 * np.exp(-(Vdiff - 17) / 18)) # ms-1 + return alpha * 1e3 # s-1 + + + def betah(self, Vm): + ''' Compute the beta rate for the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (4 / (1 + np.exp(-(Vdiff - 40) / 5))) # ms-1 + return beta * 1e3 # s-1 + + + def alphan(self, Vm): + ''' Compute the alpha rate for the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (-0.032 * (Vdiff - 15) / (np.exp(-(Vdiff - 15) / 5) - 1)) # ms-1 + return alpha * 1e3 # s-1 + + + def betan(self, Vm): + ''' Compute the beta rate for the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (0.5 * np.exp(-(Vdiff - 10) / 40)) # ms-1 + return beta * 1e3 # s-1 + + + def pinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of + slow non-inactivating Potassium channels. + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1 + np.exp(-(Vm + 35) / 10)) # prob + + + def taup(self, Vm): + ''' Compute the decay time constant for adaptation of + slow non-inactivating Potassium channels. + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + + return self.TauMax / (3.3 * np.exp((Vm + 35) / 20) + np.exp(-(Vm + 35) / 20)) # s + + + def derM(self, Vm, m): + ''' Compute the evolution of the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :param m: open-probability of Sodium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alpham(Vm) * (1 - m) - self.betam(Vm) * m + + + def derH(self, Vm, h): + ''' Compute the evolution of the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :param h: inactivation-probability of Sodium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alphah(Vm) * (1 - h) - self.betah(Vm) * h + + + def derN(self, Vm, n): + ''' Compute the evolution of the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :param n: open-probability of delayed-rectifier Potassium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alphan(Vm) * (1 - n) - self.betan(Vm) * n + + + def derP(self, Vm, p): + ''' Compute the evolution of the open-probability of + slow non-inactivating Potassium channels. + + :param Vm: membrane potential (mV) + :param p: open-probability of slow non-inactivating Potassium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.pinf(Vm) - p) / self.taup(Vm) + + + def currNa(self, m, h, Vm): + ''' Compute the inward Sodium current per unit area. + + :param m: open-probability of Sodium channels + :param h: inactivation-probability of Sodium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GNa = self.GNaMax * m**3 * h + return GNa * (Vm - self.VNa) + + + def currK(self, n, Vm): + ''' Compute the outward, delayed-rectifier Potassium current per unit area. + + :param n: open-probability of delayed-rectifier Potassium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GK = self.GKMax * n**4 + return GK * (Vm - self.VK) + + + def currM(self, p, Vm): + ''' Compute the outward, slow non-inactivating Potassium current per unit area. + + :param p: open-probability of the slow non-inactivating Potassium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GM = self.GMMax * p + return GM * (Vm - self.VK) + + + def currL(self, Vm): + ''' Compute the non-specific leakage current per unit area. + + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + return self.GL * (Vm - self.VL) + + + def currNet(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, p = states + return (self.currNa(m, h, Vm) + self.currK(n, Vm) + + self.currM(p, Vm) + self.currL(Vm)) # mA/m2 + + + def steadyStates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Solve the equation dx/dt = 0 at Vm for each x-state + meq = self.alpham(Vm) / (self.alpham(Vm) + self.betam(Vm)) + heq = self.alphah(Vm) / (self.alphah(Vm) + self.betah(Vm)) + neq = self.alphan(Vm) / (self.alphan(Vm) + self.betan(Vm)) + peq = self.pinf(Vm) + return np.array([meq, heq, neq, peq]) + + + def derStates(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, p = states + dmdt = self.derM(Vm, m) + dhdt = self.derH(Vm, h) + dndt = self.derN(Vm, n) + dpdt = self.derP(Vm, p) + return [dmdt, dhdt, dndt, dpdt] + + + def getEffRates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Compute average cycle value for rate constants + am_avg = np.mean(self.alpham(Vm)) + bm_avg = np.mean(self.betam(Vm)) + ah_avg = np.mean(self.alphah(Vm)) + bh_avg = np.mean(self.betah(Vm)) + an_avg = np.mean(self.alphan(Vm)) + bn_avg = np.mean(self.betan(Vm)) + Tp = self.taup(Vm) + pinf = self.pinf(Vm) + ap_avg = np.mean(pinf / Tp) + bp_avg = np.mean(1 / Tp) - ap_avg + + # Return array of coefficients + return np.array([am_avg, bm_avg, ah_avg, bh_avg, an_avg, bn_avg, ap_avg, bp_avg]) + + + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Concrete implementation of the abstract API method. ''' + + rates = np.array([interpolators[rn](Adrive, Qm) for rn in self.coeff_names]) + + m, h, n, p = states + dmdt = rates[0] * (1 - m) - rates[1] * m + dhdt = rates[2] * (1 - h) - rates[3] * h + dndt = rates[4] * (1 - n) - rates[5] * n + dpdt = rates[6] * (1 - p) - rates[7] * p + + return [dmdt, dhdt, dndt, dpdt] + + +class CorticalRS(Cortical): + ''' Specific membrane channel dynamics of a cortical regular spiking, excitatory + pyramidal neuron. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, + Y., Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for + different classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + ''' + + # Name of channel mechanism + name = 'RS' + + # Cell-specific biophysical parameters + Vm0 = -71.9 # Cell membrane resting potential (mV) + GNaMax = 560.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 60.0 # Max. conductance of delayed Potassium current (S/m^2) + GMMax = 0.75 # Max. conductance of slow non-inactivating Potassium current (S/m^2) + GL = 0.205 # Conductance of non-specific leakage current (S/m^2) + VL = -70.3 # Non-specific leakage Nernst potential (mV) + VT = -56.2 # Spike threshold adjustment parameter (mV) + TauMax = 0.608 # Max. adaptation decay of slow non-inactivating Potassium current (s) + + + def __init__(self): + ''' Constructor of the class. ''' + + # Instantiate parent class + super().__init__() + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + +class CorticalFS(Cortical): + ''' Specific membrane channel dynamics of a cortical fast-spiking, inhibitory neuron. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, + Y., Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for + different classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + ''' + + # Name of channel mechanism + name = 'FS' + + # Cell-specific biophysical parameters + Vm0 = -71.4 # Cell membrane resting potential (mV) + GNaMax = 580.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 39.0 # Max. conductance of delayed Potassium current (S/m^2) + GMMax = 0.787 # Max. conductance of slow non-inactivating Potassium current (S/m^2) + GL = 0.38 # Conductance of non-specific leakage current (S/m^2) + VL = -70.4 # Non-specific leakage Nernst potential (mV) + VT = -57.9 # Spike threshold adjustment parameter (mV) + TauMax = 0.502 # Max. adaptation decay of slow non-inactivating Potassium current (s) + + + def __init__(self): + ''' Constructor of the class. ''' + + # Instantiate parent class + super().__init__() + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + +class CorticalLTS(Cortical): + ''' Specific membrane channel dynamics of a cortical low-threshold spiking, inhibitory + neuron with an additional inward Calcium current due to the presence of a T-type channel. + + References: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, + Y., Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for + different classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + + *Huguenard, J.R., and McCormick, D.A. (1992). Simulation of the currents involved in + rhythmic oscillations in thalamic relay neurons. J. Neurophysiol. 68, 1373–1383.* + + ''' + + # Name of channel mechanism + name = 'LTS' + + # Cell-specific biophysical parameters + Vm0 = -54.0 # Cell membrane resting potential (mV) + GNaMax = 500.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 40.0 # Max. conductance of delayed Potassium current (S/m^2) + GMMax = 0.28 # Max. conductance of slow non-inactivating Potassium current (S/m^2) + GTMax = 4.0 # Max. conductance of low-threshold Calcium current (S/m^2) + GL = 0.19 # Conductance of non-specific leakage current (S/m^2) + VCa = 120.0 # # Calcium Nernst potential (mV) + VL = -50.0 # Non-specific leakage Nernst potential (mV) + VT = -50.0 # Spike threshold adjustment parameter (mV) + TauMax = 4.0 # Max. adaptation decay of slow non-inactivating Potassium current (s) + Vx = -7.0 # Voltage-dependence uniform shift factor at 36°C (mV) + + + def __init__(self): + ''' Constructor of the class. ''' + + # Instantiate parent class + super().__init__() + + # Add names of cell-specific Calcium channel probabilities + self.states_names += ['s', 'u'] + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + # Define the names of the different coefficients to be averaged in a lookup table. + self.coeff_names += ['alphas', 'betas', 'alphau', 'betau'] + + + + def sinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp(-(Vm + self.Vx + 57.0) / 6.2)) # prob + + + def taus(self, Vm): + ''' Compute the decay time constant for adaptation of S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + tmp = np.exp(-(Vm + self.Vx + 132.0) / 16.7) + np.exp((Vm + self.Vx + 16.8) / 18.2) + return 1.0 / 3.7 * (0.612 + 1.0 / tmp) * 1e-3 # s + + + def uinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp((Vm + self.Vx + 81.0) / 4.0)) # prob + + + def tauu(self, Vm): + ''' Compute the decay time constant for adaptation of U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + + if Vm + self.Vx < -80.0: + return 1.0 / 3.7 * np.exp((Vm + self.Vx + 467.0) / 66.6) * 1e-3 # s + else: + return 1.0 / 3.7 * (np.exp(-(Vm + self.Vx + 22) / 10.5) + 28.0) * 1e-3 # s + + + def derS(self, Vm, s): + ''' Compute the evolution of the open-probability of the S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param s: open-probability of S-type Calcium activation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.sinf(Vm) - s) / self.taus(Vm) + + + def derU(self, Vm, u): + ''' Compute the evolution of the open-probability of the U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param u: open-probability of U-type Calcium inactivation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.uinf(Vm) - u) / self.tauu(Vm) + + + def currCa(self, s, u, Vm): + ''' Compute the inward, low-threshold Calcium current per unit area. + + :param s: open-probability of the S-type activation gate of Calcium channels + :param u: open-probability of the U-type inactivation gate of Calcium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GT = self.GTMax * s**2 * u + return GT * (Vm - self.VCa) + + + def currNet(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, p, s, u = states + return (self.currNa(m, h, Vm) + self.currK(n, Vm) + self.currM(p, Vm) + + self.currCa(s, u, Vm) + self.currL(Vm)) # mA/m2 + + + def steadyStates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Call parent method to compute Sodium and Potassium channels gates steady-states + NaK_eqstates = super().steadyStates(Vm) + + # Compute Calcium channel gates steady-states + seq = self.sinf(Vm) + ueq = self.uinf(Vm) + Ca_eqstates = np.array([seq, ueq]) + + # Merge all steady-states and return + return np.concatenate((NaK_eqstates, Ca_eqstates)) + + + def derStates(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + # Unpack input states + *NaK_states, s, u = states + + # Call parent method to compute Sodium and Potassium channels states derivatives + NaK_derstates = super().derStates(Vm, NaK_states) + + # Compute Calcium channels states derivatives + dsdt = self.derS(Vm, s) + dudt = self.derU(Vm, u) + + # Merge all states derivatives and return + return NaK_derstates + [dsdt, dudt] + + + def getEffRates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Call parent method to compute Sodium and Potassium effective rate constants + NaK_rates = super().getEffRates(Vm) + + # Compute Calcium effective rate constants + Ts = self.taus(Vm) + sinf = self.sinf(Vm) + as_avg = np.mean(sinf / Ts) + bs_avg = np.mean(1 / Ts) - as_avg + Tu = np.array([self.tauu(v) for v in Vm]) + uinf = self.uinf(Vm) + au_avg = np.mean(uinf / Tu) + bu_avg = np.mean(1 / Tu) - au_avg + Ca_rates = np.array([as_avg, bs_avg, au_avg, bu_avg]) + + # Merge all rates and return + return np.concatenate((NaK_rates, Ca_rates)) + + + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Concrete implementation of the abstract API method. ''' + + # Unpack input states + *NaK_states, s, u = states + + # Call parent method to compute Sodium and Potassium channels states derivatives + NaK_dstates = super().derStatesEff(Adrive, Qm, NaK_states, interpolators) + + # Compute Calcium channels states derivatives + Ca_rates = np.array([interpolators[rn](Adrive, Qm) for rn in self.coeff_names[8:]]) + dsdt = Ca_rates[0] * (1 - s) - Ca_rates[1] * s + dudt = Ca_rates[2] * (1 - u) - Ca_rates[3] * u + + # Merge all states derivatives and return + return NaK_dstates + [dsdt, dudt] diff --git a/PointNICE/channels/leech.py b/PointNICE/channels/leech.py new file mode 100644 index 0000000..dcdf78e --- /dev/null +++ b/PointNICE/channels/leech.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-07-31 15:20:54 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 10:45:35 + +''' Channels mechanisms for leech ganglion neurons. ''' + +from functools import partialmethod +import numpy as np +from .base import BaseMech + + +class LeechTouch(BaseMech): + ''' Class defining the membrane channel dynamics of a leech touch sensory neuron. + with 4 different current types: + - Inward Sodium current + - Outward Potassium current + - Inward Calcium current + - Non-specific leakage current + - Calcium-dependent, outward Potassium current + - Outward, Sodium pumping current + + Reference: + *Cataldo, E., Brunelli, M., Byrne, J.H., Av-Ron, E., Cai, Y., and Baxter, D.A. (2005). + Computational model of touch sensory cells (T Cells) of the leech: role of the + afterhyperpolarization (AHP) in activity-dependent conduction failure. + J Comput Neurosci 18, 5–24.* + ''' + + # Name of channel mechanism + name = 'LeechT' + + # Cell-specific biophysical parameters + Cm0 = 1e-2 # Cell membrane resting capacitance (F/m2) + Vm0 = -53.0 # Cell membrane resting potential (mV) + VNa = 45.0 # Sodium Nernst potential (mV) + VK = -62.0 # Potassium Nernst potential (mV) + VCa = 60.0 # Calcium Nernst potential (mV) + VKCa = -62.0 # Calcium-dependent, Potassium current Nernst potential (mV) + VL = -48.0 # Non-specific leakage Nernst potential (mV) + VPumpNa = -300.0 # Sodium pump current reversal potential (mV) + GNaMax = 3500.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 900.0 # Max. conductance of Potassium current (S/m^2) + GCaMax = 20.0 # Max. conductance of Calcium current (S/m^2) + GKCaMax = 236.0 # Max. conductance of Calcium-dependent Potassium current (S/m^2) + GL = 1.0 # Conductance of non-specific leakage current (S/m^2) + GPumpNa = 20.0 # Max. conductance of Sodium pump current (S/m^2) + + taum = 0.1e-3 # Sodium activation time constant (s) + taus = 0.6e-3 # Calcium activation time constant (s) + + + surface = 6434.0 # surface of cell assumed as a single soma (um2) + curr_factor = 1e6 # 1/nA to 1/mA + + tau_Na_removal = 16.0 # Time constant for the removal of Sodium ions from the pool (s) + tau_Ca_removal = 1.25 # Time constant for the removal of Calcium ions from the pool (s) + + tau_PumpNa_act = 0.1 # Time constant for the PumpNa current activation from Sodium ions (s) + tau_KCa_act = 0.01 # Time constant for the KCa current activation from Calcium ions (s) + + # Names of the channels state probabilities + states_names = ['m', 'h', 'n', 's', 'C_Na', 'A_Na', 'C_Ca', 'A_Ca'] + + # Names of the channels effective coefficients + coeff_names = ['alpham', 'betam', 'alphah', 'betah', 'alphan', 'betan', 'alphas', 'betas'] + + # initial channel probabilities + states0 = np.array([]) + + def __init__(self): + ''' Constructor of the class. ''' + + self.K_Na = 0.016 * self.surface / self.curr_factor + self.K_Ca = 0.1 * self.surface / self.curr_factor + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + + # ----------------- Generic ----------------- + + def _xinf(self, Vm, halfmax, slope, power): + ''' Generic function computing the steady-state activation/inactivation of a + particular ion channel at a given voltage. + + :param Vm: membrane potential (mV) + :param halfmax: half-(in)activation voltage (mV) + :param slope: slope parameter of (in)activation function (mV) + :param power: power exponent multiplying the exponential expression (integer) + :return: steady-state (in)activation (-) + ''' + + return 1 / (1 + np.exp((Vm - halfmax) / slope))**power + + + def _taux(self, Vm, halfmax, slope, tauMax, tauMin): + ''' Generic function computing the voltage-dependent, activation/inactivation time constant + of a particular ion channel at a given voltage. + + :param Vm: membrane potential (mV) + :param halfmax: voltage at which (in)activation time constant is half-maximal (mV) + :param slope: slope parameter of (in)activation time constant function (mV) + :return: steady-state (in)activation (-) + ''' + + return (tauMax - tauMin) / (1 + np.exp((Vm - halfmax) / slope)) + tauMin + + + def _derC_ion(self, Cion, Iion, Kion, tau): + ''' Generic function computing the time derivative of the concentration + of a specific ion in its intracellular pool. + + :param Cion: ion concentration in the pool (arbitrary unit) + :param Iion: ionic current (mA/m2) + :param Kion: scaling factor for current contribution to pool (arb. unit / nA???) + :param tau: time constant for removal of ions from the pool (s) + :return: variation of ionic concentration in the pool (arbitrary unit /s) + ''' + + return (Kion * (-Iion) - Cion) / tau + + + + def _derA_ion(self, Aion, Cion, tau): + ''' Generic function computing the time derivative of the concentration and time + dependent activation function, for a specific pool-dependent ionic current. + + :param Aion: concentration and time dependent activation function (arbitrary unit) + :param Cion: ion concentration in the pool (arbitrary unit) + :param tau: time constant for activation function variation (s) + :return: variation of activation function (arbitrary unit / s) + ''' + return (Cion - Aion) / tau + + + # ------------------ Na ------------------- + + + minf = partialmethod(_xinf, halfmax=-35.0, slope=-5.0, power=1) + hinf = partialmethod(_xinf, halfmax=-50.0, slope=9.0, power=2) + tauh = partialmethod(_taux, halfmax=-36.0, slope=3.5, tauMax=14.0e-3, tauMin=0.2e-3) + + + def derM(self, Vm, m): + ''' Instantaneous derivative of Sodium activation. ''' + return (self.minf(Vm) - m) / self.taum # s-1 + + + def derH(self, Vm, h): + ''' Instantaneous derivative of Sodium inactivation. ''' + return (self.hinf(Vm) - h) / self.tauh(Vm) # s-1 + + + # ------------------ K ------------------- + + ninf = partialmethod(_xinf, halfmax=-22.0, slope=-9.0, power=1) + taun = partialmethod(_taux, halfmax=-10.0, slope=10.0, tauMax=6.0e-3, tauMin=1.0e-3) + + + def derN(self, Vm, n): + ''' Instantaneous derivative of Potassium activation. ''' + return (self.ninf(Vm) - n) / self.taun(Vm) # s-1 + + + # ------------------ Ca ------------------- + + sinf = partialmethod(_xinf, halfmax=-10.0, slope=-2.8, power=1) + + + def derS(self, Vm, s): + ''' Instantaneous derivative of Calcium activation. ''' + return (self.sinf(Vm) - s) / self.taus # s-1 + + + # ------------------ Pools ------------------- + + + def derC_Na(self, C_Na, I_Na): + ''' Derivative of Sodium concentration in intracellular pool. ''' + return self._derC_ion(C_Na, I_Na, self.K_Na, self.tau_Na_removal) + + + def derA_Na(self, A_Na, C_Na): + ''' Derivative of Sodium pool-dependent activation function for iPumpNa. ''' + return self._derA_ion(A_Na, C_Na, self.tau_PumpNa_act) + + + def derC_Ca(self, C_Ca, I_Ca): + ''' Derivative of Calcium concentration in intracellular pool. ''' + return self._derC_ion(C_Ca, I_Ca, self.K_Ca, self.tau_Ca_removal) + + + def derA_Ca(self, A_Ca, C_Ca): + ''' Derivative of Calcium pool-dependent activation function for iKCa. ''' + return self._derA_ion(A_Ca, C_Ca, self.tau_KCa_act) + + + # ------------------ Currents ------------------- + + def currNa(self, m, h, Vm): + ''' Sodium inward current. ''' + return self.GNaMax * m**3 * h * (Vm - self.VNa) + + + def currK(self, n, Vm): + ''' Potassium outward current. ''' + return self.GKMax * n**2 * (Vm - self.VK) + + + def currCa(self, s, Vm): + ''' Calcium inward current. ''' + return self.GCaMax * s * (Vm - self.VCa) + + + def currKCa(self, A_Ca, Vm): + ''' Calcium-activated Potassium outward current. ''' + return self.GKCaMax * A_Ca * (Vm - self.VKCa) + + + def currPumpNa(self, A_Na, Vm): + ''' Outward current mimicking the activity of the NaK-ATPase pump. ''' + return self.GPumpNa * A_Na * (Vm - self.VPumpNa) + + + def currL(self, Vm): + ''' Leakage current. ''' + return self.GL * (Vm - self.VL) + + + def currNet(self, Vm, states): + ''' Compute the net ionic current per unit area. + + :param Vm: membrane potential (mV) + :states: state probabilities of the ion channels + :return: current per unit area (mA/m2) + ''' + + m, h, n, s, _, A_Na, _, A_Ca = states + return (self.currNa(m, h, Vm) + self.currK(n, Vm) + self.currCa(s, Vm) + + self.currL(Vm) + self.currPumpNa(A_Na, Vm) + self.currKCa(A_Ca, Vm)) # mA/m2 + + + def steadyStates(self, Vm): + ''' Compute the Sodium, Potassium and Calcium channel gates steady-states + for a specific membrane potential value. + + :param Vm: membrane potential (mV) + :return: array of steady-states + ''' + + # Standard gating dynamics: Solve the equation dx/dt = 0 at Vm for each x-state + meq = self.minf(Vm) + heq = self.hinf(Vm) + neq = self.ninf(Vm) + seq = self.sinf(Vm) + + # PumpNa pool concentration and activation steady-state + INa_eq = self.currNa(meq, heq, Vm) + CNa_eq = self.K_Na * (-INa_eq) + ANa_eq = CNa_eq + # print('initial Na current: {:.2f} mA/m2'.format(INa_eq)) + # print('initial Na concentration in pool: {:.2f} arb. unit'.format(CNa_eq)) + + # KCa current pool concentration and activation steady-state + ICa_eq = self.currCa(seq, Vm) + CCa_eq = self.K_Ca * (-ICa_eq) + ACa_eq = CCa_eq + # print('initial Ca current: {:.2f} mA/m2'.format(ICa_eq)) + # print('initial Ca concentration in pool: {:.2f} arb. unit'.format(CCa_eq)) + + return np.array([meq, heq, neq, seq, CNa_eq, ANa_eq, CCa_eq, ACa_eq]) + + + def derStates(self, Vm, states): + ''' Compute the derivatives of channel states. + + :param Vm: membrane potential (mV) + :states: state probabilities of the ion channels + :return: current per unit area (mA/m2) + ''' + + # Unpack states + m, h, n, s, C_Na, A_Na, C_Ca, A_Ca = states + + # Standard gating states derivatives + dmdt = self.derM(Vm, m) + dhdt = self.derH(Vm, h) + dndt = self.derN(Vm, n) + dsdt = self.derS(Vm, s) + + # PumpNa current pool concentration and activation state + I_Na = self.currNa(m, h, Vm) + dCNa_dt = self.derC_Na(C_Na, I_Na) + dANa_dt = self.derA_Na(A_Na, C_Na) + + # KCa current pool concentration and activation state + I_Ca = self.currCa(s, Vm) + dCCa_dt = self.derC_Ca(C_Ca, I_Ca) + dACa_dt = self.derA_Ca(A_Ca, C_Ca) + + # Pack derivatives and return + return [dmdt, dhdt, dndt, dsdt, dCNa_dt, dANa_dt, dCCa_dt, dACa_dt] + + + def getEffRates(self, Vm): + ''' Get the effective rate constants of ion channels, averaged along an acoustic cycle, + for future use in effective simulations. + + :param Vm: array of membrane potential values for an acoustic cycle (mV) + :return: an array of rate average constants (s-1) + ''' + + # Compute average cycle value for rate constants + Tm = self.taum + minf = self.minf(Vm) + am_avg = np.mean(minf / Tm) + bm_avg = np.mean(1 / Tm) - am_avg + + Th = self.tauh(Vm) + hinf = self.hinf(Vm) + ah_avg = np.mean(hinf / Th) + bh_avg = np.mean(1 / Th) - ah_avg + + Tn = self.taun(Vm) + ninf = self.ninf(Vm) + an_avg = np.mean(ninf / Tn) + bn_avg = np.mean(1 / Tn) - an_avg + + Ts = self.taus + sinf = self.sinf(Vm) + as_avg = np.mean(sinf / Ts) + bs_avg = np.mean(1 / Ts) - as_avg + + # Return array of coefficients + return np.array([am_avg, bm_avg, ah_avg, bh_avg, an_avg, bn_avg, as_avg, bs_avg]) + + + + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Compute the effective derivatives of channel states, based on + 2-dimensional linear interpolation of "effective" coefficients + that summarize the system's behaviour over an acoustic cycle. + + :param Vm_eff: effective membrane potential (mV) + :states: state probabilities of the ion channels + :param interpolators: dictionary of 2-dimensional linear interpolators + of "effective" rates over the 2D amplitude x charge input domain. + ''' + + rates = np.array([interpolators[rn](Adrive, Qm) for rn in self.coeff_names]) + Vmeff = interpolators['V'](Adrive, Qm) + + # Unpack states + m, h, n, s, C_Na, A_Na, C_Ca, A_Ca = states + + # Standard gating states derivatives + dmdt = rates[0] * (1 - m) - rates[1] * m + dhdt = rates[2] * (1 - h) - rates[3] * h + dndt = rates[4] * (1 - n) - rates[5] * n + dsdt = rates[6] * (1 - m) - rates[7] * s + + # PumpNa current pool concentration and activation state + I_Na = self.currNa(m, h, Vmeff) + dCNa_dt = self.derC_Na(C_Na, I_Na) + dANa_dt = self.derA_Na(A_Na, C_Na) + + # KCa current pool concentration and activation state + I_Ca_eff = self.currCa(s, Vmeff) + dCCa_dt = self.derC_Ca(C_Ca, I_Ca_eff) + dACa_dt = self.derA_Ca(A_Ca, C_Ca) + + # Pack derivatives and return + return [dmdt, dhdt, dndt, dsdt, dCNa_dt, dANa_dt, dCCa_dt, dACa_dt] diff --git a/PointNICE/channels/thalamic.py b/PointNICE/channels/thalamic.py new file mode 100644 index 0000000..14fd16e --- /dev/null +++ b/PointNICE/channels/thalamic.py @@ -0,0 +1,771 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-07-31 15:20:54 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 14:48:08 + +''' Channels mechanisms for thalamic neurons. ''' + +import numpy as np +from .base import BaseMech + + +class Thalamic(BaseMech): + ''' Class defining the generic membrane channel dynamics of a thalamic neuron + with 4 different current types: + - Inward Sodium current + - Outward Potassium current + - Inward Calcium current + - Non-specific leakage current + This generic class cannot be used directly as it does not contain any specific parameters. + + Reference: + *Plaksin, M., Kimmel, E., and Shoham, S. (2016). Cell-Type-Selective Effects of + Intramembrane Cavitation as a Unifying Theoretical Framework for Ultrasonic + Neuromodulation. eNeuro 3.* + ''' + + # Generic biophysical parameters of thalamic cells + Cm0 = 1e-2 # Cell membrane resting capacitance (F/m2) + Vm0 = 0.0 # Dummy value for membrane potential (mV) + VNa = 50.0 # Sodium Nernst potential (mV) + VK = -90.0 # Potassium Nernst potential (mV) + VCa = 120.0 # Calcium Nernst potential (mV) + + # Names and initial states of the channels state probabilities + states_names = ['m', 'h', 'n', 's', 'u'] + states0 = np.array([]) + + # Names of the different coefficients to be averaged in a lookup table. + coeff_names = ['alpham', 'betam', 'alphah', 'betah', 'alphan', 'betan', 'alphas', 'betas', + 'alphau', 'betau'] + + + def __init__(self): + ''' Constructor of the class ''' + pass + + + def alpham(self, Vm): + ''' Compute the alpha rate for the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (-0.32 * (Vdiff - 13) / (np.exp(- (Vdiff - 13) / 4) - 1)) # ms-1 + return alpha * 1e3 # s-1 + + + def betam(self, Vm): + ''' Compute the beta rate for the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (0.28 * (Vdiff - 40) / (np.exp((Vdiff - 40) / 5) - 1)) # ms-1 + return beta * 1e3 # s-1 + + + def alphah(self, Vm): + ''' Compute the alpha rate for the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (0.128 * np.exp(-(Vdiff - 17) / 18)) # ms-1 + return alpha * 1e3 # s-1 + + + def betah(self, Vm): + ''' Compute the beta rate for the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (4 / (1 + np.exp(-(Vdiff - 40) / 5))) # ms-1 + return beta * 1e3 # s-1 + + + def alphan(self, Vm): + ''' Compute the alpha rate for the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + alpha = (-0.032 * (Vdiff - 15) / (np.exp(-(Vdiff - 15) / 5) - 1)) # ms-1 + return alpha * 1e3 # s-1 + + + def betan(self, Vm): + ''' Compute the beta rate for the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :return: rate constant (s-1) + ''' + + Vdiff = Vm - self.VT + beta = (0.5 * np.exp(-(Vdiff - 10) / 40)) # ms-1 + return beta * 1e3 # s-1 + + + def derM(self, Vm, m): + ''' Compute the evolution of the open-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :param m: open-probability of Sodium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alpham(Vm) * (1 - m) - self.betam(Vm) * m + + + def derH(self, Vm, h): + ''' Compute the evolution of the inactivation-probability of Sodium channels. + + :param Vm: membrane potential (mV) + :param h: inactivation-probability of Sodium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alphah(Vm) * (1 - h) - self.betah(Vm) * h + + + def derN(self, Vm, n): + ''' Compute the evolution of the open-probability of delayed-rectifier Potassium channels. + + :param Vm: membrane potential (mV) + :param n: open-probability of delayed-rectifier Potassium channels (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return self.alphan(Vm) * (1 - n) - self.betan(Vm) * n + + + def derS(self, Vm, s): + ''' Compute the evolution of the open-probability of the S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param s: open-probability of S-type Calcium activation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.sinf(Vm) - s) / self.taus(Vm) + + + def derU(self, Vm, u): + ''' Compute the evolution of the open-probability of the U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param u: open-probability of U-type Calcium inactivation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.uinf(Vm) - u) / self.tauu(Vm) + + + def currNa(self, m, h, Vm): + ''' Compute the inward Sodium current per unit area. + + :param m: open-probability of Sodium channels + :param h: inactivation-probability of Sodium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GNa = self.GNaMax * m**3 * h + return GNa * (Vm - self.VNa) + + + def currK(self, n, Vm): + ''' Compute the outward delayed-rectifier Potassium current per unit area. + + :param n: open-probability of delayed-rectifier Potassium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GK = self.GKMax * n**4 + return GK * (Vm - self.VK) + + + def currCa(self, s, u, Vm): + ''' Compute the inward Calcium current per unit area. + + :param s: open-probability of the S-type activation gate of Calcium channels + :param u: open-probability of the U-type inactivation gate of Calcium channels + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + GT = self.GTMax * s**2 * u + return GT * (Vm - self.VCa) + + + def currL(self, Vm): + ''' Compute the non-specific leakage current per unit area. + + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + return self.GL * (Vm - self.VL) + + + def currNet(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, s, u = states + return (self.currNa(m, h, Vm) + self.currK(n, Vm) + + self.currCa(s, u, Vm) + self.currL(Vm)) # mA/m2 + + + def steadyStates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Solve the equation dx/dt = 0 at Vm for each x-state + meq = self.alpham(Vm) / (self.alpham(Vm) + self.betam(Vm)) + heq = self.alphah(Vm) / (self.alphah(Vm) + self.betah(Vm)) + neq = self.alphan(Vm) / (self.alphan(Vm) + self.betan(Vm)) + seq = self.sinf(Vm) + ueq = self.uinf(Vm) + return np.array([meq, heq, neq, seq, ueq]) + + + def derStates(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, s, u = states + dmdt = self.derM(Vm, m) + dhdt = self.derH(Vm, h) + dndt = self.derN(Vm, n) + dsdt = self.derS(Vm, s) + dudt = self.derU(Vm, u) + return [dmdt, dhdt, dndt, dsdt, dudt] + + + def getEffRates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Compute average cycle value for rate constants + am_avg = np.mean(self.alpham(Vm)) + bm_avg = np.mean(self.betam(Vm)) + ah_avg = np.mean(self.alphah(Vm)) + bh_avg = np.mean(self.betah(Vm)) + an_avg = np.mean(self.alphan(Vm)) + bn_avg = np.mean(self.betan(Vm)) + Ts = self.taus(Vm) + sinf = self.sinf(Vm) + as_avg = np.mean(sinf / Ts) + bs_avg = np.mean(1 / Ts) - as_avg + Tu = np.array([self.tauu(v) for v in Vm]) + uinf = self.uinf(Vm) + au_avg = np.mean(uinf / Tu) + bu_avg = np.mean(1 / Tu) - au_avg + + # Return array of coefficients + return np.array([am_avg, bm_avg, ah_avg, bh_avg, an_avg, bn_avg, + as_avg, bs_avg, au_avg, bu_avg]) + + + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Concrete implementation of the abstract API method. ''' + + rates = np.array([interpolators[rn](Adrive, Qm) for rn in self.coeff_names]) + + m, h, n, s, u = states + dmdt = rates[0] * (1 - m) - rates[1] * m + dhdt = rates[2] * (1 - h) - rates[3] * h + dndt = rates[4] * (1 - n) - rates[5] * n + dsdt = rates[6] * (1 - s) - rates[7] * s + dudt = rates[8] * (1 - u) - rates[9] * u + + return [dmdt, dhdt, dndt, dsdt, dudt] + + + +class ThalamicRE(Thalamic): + ''' Specific membrane channel dynamics of a thalamic reticular neuron. + + References: + *Destexhe, A., Contreras, D., Steriade, M., Sejnowski, T.J., and Huguenard, J.R. (1996). + In vivo, in vitro, and computational analysis of dendritic calcium currents in thalamic + reticular neurons. J. Neurosci. 16, 169–185.* + + *Huguenard, J.R., and Prince, D.A. (1992). A novel T-type current underlies prolonged + Ca(2+)-dependent burst firing in GABAergic neurons of rat thalamic reticular nucleus. + J. Neurosci. 12, 3804–3817.* + + ''' + + # Name of channel mechanism + name = 'RE' + + # Cell-specific biophysical parameters + Vm0 = -89.5 # Cell membrane resting potential (mV) + GNaMax = 2000.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 200.0 # Max. conductance of Potassium current (S/m^2) + GTMax = 30.0 # Max. conductance of low-threshold Calcium current (S/m^2) + GL = 0.5 # Conductance of non-specific leakage current (S/m^2) + VL = -90.0 # Non-specific leakage Nernst potential (mV) + VT = -67.0 # Spike threshold adjustment parameter (mV) + + def __init__(self): + ''' Constructor of the class. ''' + + # Instantiate parent class + super().__init__() + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + + def sinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp(-(Vm + 52.0) / 7.4)) # prob + + + def taus(self, Vm): + ''' Compute the decay time constant for adaptation of S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + return (1 + 0.33 / (np.exp((Vm + 27.0) / 10.0) + np.exp(-(Vm + 102.0) / 15.0))) * 1e-3 # s + + + def uinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp((Vm + 80.0) / 5.0)) # prob + + + def tauu(self, Vm): + ''' Compute the decay time constant for adaptation of U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + return (28.3 + 0.33 / (np.exp((Vm + 48.0) / 4.0) + np.exp(-(Vm + 407.0) / 50.0))) * 1e-3 # s + + + +class ThalamoCortical(Thalamic): + ''' Specific membrane channel dynamics of a thalamo-cortical neuron, with a specific + hyperpolarization-activated, mixed cationic current and a leakage Potassium current. + + References: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., + Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different + classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + *Destexhe, A., Bal, T., McCormick, D.A., and Sejnowski, T.J. (1996). Ionic mechanisms + underlying synchronized oscillations and propagating waves in a model of ferret + thalamic slices. J. Neurophysiol. 76, 2049–2070.* + *McCormick, D.A., and Huguenard, J.R. (1992). A model of the electrophysiological + properties of thalamocortical relay neurons. J. Neurophysiol. 68, 1384–1400.* + ''' + + + # Name of channel mechanism + name = 'TC' + + # Cell-specific biophysical parameters + # Vm0 = -63.4 # Cell membrane resting potential (mV) + Vm0 = -61.93 # Cell membrane resting potential (mV) + GNaMax = 900.0 # Max. conductance of Sodium current (S/m^2) + GKMax = 100.0 # Max. conductance of Potassium current (S/m^2) + GTMax = 20.0 # Max. conductance of low-threshold Calcium current (S/m^2) + GKL = 0.138 # Conductance of leakage Potassium current (S/m^2) + GhMax = 0.175 # Max. conductance of mixed cationic current (S/m^2) + GL = 0.1 # Conductance of non-specific leakage current (S/m^2) + Vh = -40.0 # Mixed cationic current reversal potential (mV) + VL = -70.0 # Non-specific leakage Nernst potential (mV) + VT = -52.0 # Spike threshold adjustment parameter (mV) + Vx = 0.0 # Voltage-dependence uniform shift factor at 36°C (mV) + + tau_Ca_removal = 5e-3 # decay time constant for intracellular Ca2+ dissolution (s) + CCa_min = 50e-9 # minimal intracellular Calcium concentration (M) + deff = 100e-9 # effective depth beneath membrane for intracellular [Ca2+] calculation + F_Ca = 1.92988e5 # Faraday constant for bivalent ion (Coulomb / mole) + nCa = 4 # number of Calcium binding sites on regulating factor + k1 = 2.5e22 # intracellular Ca2+ regulation factor (M-4 s-1) + k2 = 0.4 # intracellular Ca2+ regulation factor (s-1) + k3 = 100.0 # intracellular Ca2+ regulation factor (s-1) + k4 = 1.0 # intracellular Ca2+ regulation factor (s-1) + + + def __init__(self): + ''' Constructor of the class. ''' + + # Instantiate parent class + super().__init__() + + # Compute current to concentration conversion constant + self.iT_2_CCa = 1e-6 / (self.deff * self.F_Ca) + + # Define names of the channels state probabilities + self.states_names += ['O', 'C', 'P0', 'C_Ca'] + + # Define the names of the different coefficients to be averaged in a lookup table. + self.coeff_names += ['alphao', 'betao'] + + # Define initial channel probabilities (solving dx/dt = 0 at resting potential) + self.states0 = self.steadyStates(self.Vm0) + + + def sinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the S-type, + activation gate of Calcium channels. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., + Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different + classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp(-(Vm + self.Vx + 57.0) / 6.2)) # prob + + + def taus(self, Vm): + ''' Compute the decay time constant for adaptation of S-type, + activation gate of Calcium channels. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., + Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different + classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + tmp = np.exp(-(Vm + self.Vx + 132.0) / 16.7) + np.exp((Vm + self.Vx + 16.8) / 18.2) + return 1.0 / 3.7 * (0.612 + 1.0 / tmp) * 1e-3 # s + + + def uinf(self, Vm): + ''' Compute the asymptotic value of the open-probability of the U-type, + inactivation gate of Calcium channels. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., + Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different + classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + + :param Vm: membrane potential (mV) + :return: asymptotic probability (-) + ''' + + return 1.0 / (1.0 + np.exp((Vm + self.Vx + 81.0) / 4.0)) # prob + + + def tauu(self, Vm): + ''' Compute the decay time constant for adaptation of U-type, + inactivation gate of Calcium channels. + + Reference: + *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., + Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different + classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* + + :param Vm: membrane potential (mV) + :return: decayed time constant (s) + ''' + + if Vm + self.Vx < -80.0: + return 1.0 / 3.7 * np.exp((Vm + self.Vx + 467.0) / 66.6) * 1e-3 # s + else: + return 1 / 3.7 * (np.exp(-(Vm + self.Vx + 22) / 10.5) + 28.0) * 1e-3 # s + + + def derS(self, Vm, s): + ''' Compute the evolution of the open-probability of the S-type, + activation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param s: open-probability of S-type Calcium activation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.sinf(Vm) - s) / self.taus(Vm) + + + def derU(self, Vm, u): + ''' Compute the evolution of the open-probability of the U-type, + inactivation gate of Calcium channels. + + :param Vm: membrane potential (mV) + :param u: open-probability of U-type Calcium inactivation gates (prob) + :return: derivative of open-probability w.r.t. time (prob/s) + ''' + + return (self.uinf(Vm) - u) / self.tauu(Vm) + + + + def oinf(self, Vm): + ''' Voltage-dependent steady-state activation of hyperpolarization-activated + cation current channels. + + Reference: + *Huguenard, J.R., and McCormick, D.A. (1992). Simulation of the currents involved in + rhythmic oscillations in thalamic relay neurons. J. Neurophysiol. 68, 1373–1383.* + + :param Vm: membrane potential (mV) + :return: steady-state activation (-) + ''' + + return 1.0 / (1.0 + np.exp((Vm + 75.0) / 5.5)) + + + def tauo(self, Vm): + ''' Time constant for activation of hyperpolarization-activated cation current channels. + + Reference: + *Huguenard, J.R., and McCormick, D.A. (1992). Simulation of the currents involved in + rhythmic oscillations in thalamic relay neurons. J. Neurophysiol. 68, 1373–1383.* + + :param Vm: membrane potential (mV) + :return: time constant (s) + ''' + + return 1 / (np.exp(-14.59 - 0.086 * Vm) + np.exp(-1.87 + 0.0701 * Vm)) * 1e-3 + + + def alphao(self, Vm): + ''' Transition rate between closed and open form of hyperpolarization-activated + cation current channels. + + :param Vm: membrane potential (mV) + :return: transition rate (s-1) + ''' + + return self.oinf(Vm) / self.tauo(Vm) + + + def betao(self, Vm): + ''' Transition rate between open and closed form of hyperpolarization-activated + cation current channels. + + :param Vm: membrane potential (mV) + :return: transition rate (s-1) + ''' + + return (1 - self.oinf(Vm)) / self.tauo(Vm) + + + def derC(self, C, O, Vm): + ''' Compute the evolution of the proportion of hyperpolarization-activated + cation current channels in closed state. + + Kinetics scheme of Calcium dependent activation derived from: + *Destexhe, A., Bal, T., McCormick, D.A., and Sejnowski, T.J. (1996). Ionic mechanisms + underlying synchronized oscillations and propagating waves in a model of ferret + thalamic slices. J. Neurophysiol. 76, 2049–2070.* + + :param Vm: membrane potential (mV) + :param C: proportion of Ih channels in closed state (-) + :param O: proportion of Ih channels in open state (-) + :return: derivative of proportion w.r.t. time (s-1) + ''' + + return self.betao(Vm) * O - self.alphao(Vm) * C + + + def derO(self, C, O, P0, Vm): + ''' Compute the evolution of the proportion of hyperpolarization-activated + cation current channels in open state. + + Kinetics scheme of Calcium dependent activation derived from: + *Destexhe, A., Bal, T., McCormick, D.A., and Sejnowski, T.J. (1996). Ionic mechanisms + underlying synchronized oscillations and propagating waves in a model of ferret + thalamic slices. J. Neurophysiol. 76, 2049–2070.* + + :param Vm: membrane potential (mV) + :param C: proportion of Ih channels in closed state (-) + :param O: proportion of Ih channels in open state (-) + :param P0: proportion of Ih channels regulating factor in unbound state (-) + :return: derivative of proportion w.r.t. time (s-1) + ''' + + return - self.derC(C, O, Vm) - self.k3 * O * (1 - P0) + self.k4 * (1 - O - C) + + + def derP0(self, P0, C_Ca): + ''' Compute the evolution of the proportion of Ih channels regulating factor + in unbound state. + + Kinetics scheme of Calcium dependent activation derived from: + *Destexhe, A., Bal, T., McCormick, D.A., and Sejnowski, T.J. (1996). Ionic mechanisms + underlying synchronized oscillations and propagating waves in a model of ferret + thalamic slices. J. Neurophysiol. 76, 2049–2070.* + + :param Vm: membrane potential (mV) + :param P0: proportion of Ih channels regulating factor in unbound state (-) + :param C_Ca: Calcium concentration in effective submembranal space (M) + :return: derivative of proportion w.r.t. time (s-1) + ''' + + return self.k2 * (1 - P0) - self.k1 * P0 * C_Ca**self.nCa + + + def derC_Ca(self, C_Ca, ICa): + ''' Compute the evolution of the Calcium concentration in submembranal space. + + Model of Ca2+ buffering and contribution from iCa derived from: + *McCormick, D.A., and Huguenard, J.R. (1992). A model of the electrophysiological + properties of thalamocortical relay neurons. J. Neurophysiol. 68, 1384–1400.* + + + :param Vm: membrane potential (mV) + :param C_Ca: Calcium concentration in submembranal space (M) + :param ICa: inward Calcium current filling up the submembranal space with Ca2+ (mA/m2) + :return: derivative of Calcium concentration in submembranal space w.r.t. time (s-1) + ''' + + return (self.CCa_min - C_Ca) / self.tau_Ca_removal - self.iT_2_CCa * ICa + + + def currKL(self, Vm): + ''' Compute the voltage-dependent leak Potassium current per unit area. + + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + return self.GKL * (Vm - self.VK) + + + def currH(self, O, C, Vm): + ''' Compute the outward mixed cationic current per unit area. + + :param O: proportion of the channels in open form + :param OL: proportion of the channels in locked-open form + :param Vm: membrane potential (mV) + :return: current per unit area (mA/m2) + ''' + + OL = 1 - O - C + return self.GhMax * (O + 2 * OL) * (Vm - self.Vh) + + + def currNet(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, s, u, O, C, _, _ = states + return (self.currNa(m, h, Vm) + self.currK(n, Vm) + + self.currCa(s, u, Vm) + + self.currKL(Vm) + + self.currH(O, C, Vm) + + self.currL(Vm)) # mA/m2 + + + def steadyStates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Call parent method to compute Sodium, Potassium and Calcium channels gates steady-states + NaKCa_eqstates = super().steadyStates(Vm) + + # Compute steady-state Calcium current + seq = NaKCa_eqstates[3] + ueq = NaKCa_eqstates[4] + iTeq = self.currCa(seq, ueq, Vm) + + # Compute steady-state variables for the kinetics system of Ih + CCa_eq = self.CCa_min - self.tau_Ca_removal * self.iT_2_CCa * iTeq + BA = self.betao(Vm) / self.alphao(Vm) + P0_eq = self.k2 / (self.k2 + self.k1 * CCa_eq**self.nCa) + O_eq = self.k4 / (self.k3 * (1 - P0_eq) + self.k4 * (1 + BA)) + C_eq = BA * O_eq + + kin_eqstates = np.array([O_eq, C_eq, P0_eq, CCa_eq]) + + # Merge all steady-states and return + return np.concatenate((NaKCa_eqstates, kin_eqstates)) + + + def derStates(self, Vm, states): + ''' Concrete implementation of the abstract API method. ''' + + m, h, n, s, u, O, C, P0, C_Ca = states + + NaKCa_states = [m, h, n, s, u] + NaKCa_derstates = super().derStates(Vm, NaKCa_states) + + dO_dt = self.derO(C, O, P0, Vm) + dC_dt = self.derC(C, O, Vm) + dP0_dt = self.derP0(P0, C_Ca) + ICa = self.currCa(s, u, Vm) + dCCa_dt = self.derC_Ca(C_Ca, ICa) + + return NaKCa_derstates + [dO_dt, dC_dt, dP0_dt, dCCa_dt] + + + def getEffRates(self, Vm): + ''' Concrete implementation of the abstract API method. ''' + + # Compute effective coefficients for Sodium, Potassium and Calcium conductances + NaKCa_effrates = super().getEffRates(Vm) + + # Compute effective coefficients for Ih conductance + ao_avg = np.mean(self.alphao(Vm)) + bo_avg = np.mean(self.betao(Vm)) + iH_effrates = np.array([ao_avg, bo_avg]) + + # Return array of coefficients + return np.concatenate((NaKCa_effrates, iH_effrates)) + + + def derStatesEff(self, Adrive, Qm, states, interpolators): + ''' Concrete implementation of the abstract API method. ''' + + rates = np.array([interpolators[rn](Adrive, Qm) for rn in self.coeff_names]) + Vmeff = interpolators['V'](Adrive, Qm) + + # Unpack states + m, h, n, s, u, O, C, P0, C_Ca = states + + # INa, IK, ICa effective states derivatives + dmdt = rates[0] * (1 - m) - rates[1] * m + dhdt = rates[2] * (1 - h) - rates[3] * h + dndt = rates[4] * (1 - n) - rates[5] * n + dsdt = rates[6] * (1 - s) - rates[7] * s + dudt = rates[8] * (1 - u) - rates[9] * u + + # Ih effective states derivatives + dC_dt = rates[11] * O - rates[10] * C + dO_dt = - dC_dt - self.k3 * O * (1 - P0) + self.k4 * (1 - O - C) + dP0_dt = self.derP0(P0, C_Ca) + ICa_eff = self.currCa(s, u, Vmeff) + dCCa_dt = self.derC_Ca(C_Ca, ICa_eff) + + # Merge derivatives and return + return [dmdt, dhdt, dndt, dsdt, dudt, dO_dt, dC_dt, dP0_dt, dCCa_dt] diff --git a/PointNICE/constants.py b/PointNICE/constants.py new file mode 100644 index 0000000..6e7b00f --- /dev/null +++ b/PointNICE/constants.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-11-04 13:23:31 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 17:29:02 + +''' Algorithmic constants used in the core modules. ''' + +# Fitting and pre-processing +LJFIT_PM_MAX = 1e8 # intermolecular pressure at the deflection lower bound for LJ fitting (Pa) +PNET_EQ_MAX = 1e-1 # error threshold for net pressure at computed equilibrium position (Pa) +PMAVG_STD_ERR_MAX = 1500 # error threshold in nonlinear fit of molecular pressure (Pa) + + + +# Generic integration constants +NPC_FULL = 1000 # nb of samples per acoustic period in full system +SOLVER_NSTEPS = 1000 # maximum number of steps allowed during one call to the LSODA/DOP853 solvers +CLASSIC_DS_FACTOR = 3 # time downsampling factor applied to output arrays of classic simulations + +# Effective integration +DT_EFF = 5e-5 # time step for effective integration (s) +# DT_EFF = 1e-6 # time step for effective integration (s) + +# Mechanical simulations +Z_ERR_MAX = 1e-11 # periodic convergence threshold for deflection gas content (m) +NG_ERR_MAX = 1e-24 # periodic convergence threshold for gas content (mol) + +# Hybrid integration +NPC_HH = 40 # nb of samples per acoustic period in HH system +DQ_UPDATE = 1e-5 # charge evolution threshold between two hybrid integrations (C/m2) +DT_UPDATE = 5e-4 # time interval between two hybrid integrations (s) + +# Titrations +TITRATION_AMAX = 2e5 # initial acoustic pressure upper bound for titration procedure (Pa) +TITRATION_TMAX = 2e-1 # initial stimulus duration upper bound for titration procedure (Pa) +TITRATION_DA_THR = 1e3 # acoustic pressure search range threshold for titration procedure (Pa) +TITRATION_DT_THR = 1e-3 # stimulus duration search range threshold for titration procedure (s) + +# Spike detection +SPIKE_MIN_QAMP = 10e-5 # threshold amplitude for spike detection on charge signal (C/m2) +SPIKE_MIN_DT = 1e-3 # minimal time interval for spike detection on charge signal (s) + diff --git a/PointNICE/params.yaml b/PointNICE/params.yaml new file mode 100644 index 0000000..aeb2934 --- /dev/null +++ b/PointNICE/params.yaml @@ -0,0 +1,26 @@ +# BIOMECHANICS +biomech: + T: 309.15 # Temperature (K) + Rg: 8.314 # Universal gas constant (Pa.m^3.mol^-1.K^-1) + delta0: 2.0e-9 # Thickness of the leaflet (m) + Delta_: 1.4e-9 # Initial gap between the two leaflets on a non-charged membrane at equilibrium (m) + pDelta: 1.0e5 # Attraction/repulsion pressure coefficient (Pa) + m: 5.0 # Exponent in the repulsion term (dimensionless) + n: 3.3 # Exponent in the attraction term (dimensionless) + rhoL: 1075.0 # Density of the surrounding fluid (kg/m^3) + muL: 7.0e-4 # Dynamic viscosity of the surrounding fluid (Pa.s) + muS: 0.035 # Dynamic viscosity of the leaflet (Pa.s) + kA: 0.24 # Area compression modulus of the leaflet (N/m) + alpha: 7.56 # Tissue shear loss modulus frequency coefficient (Pa.s) + C0: 0.62 # Initial gas molar concentration in the surrounding fluid (mol/m^3) + kH: 1.613e5 # Henry's constant (Pa.m^3/mol) + P0: 1.0e5 # Static pressure in the surrounding fluid (Pa) + Dgl: 3.68e-9 # Diffusion coefficient of gas in the fluid (m^2/s) + xi: 0.5e-9 # Boundary layer thickness for gas transport across leaflet (m) + c: 1515.0 # Speed of sound in medium (m/s) + +# BIOPHYSICS +biophys: + epsilon0: 8.854e-12 # Vacuum permittivity (F/m) + epsilonR: 1.0 # Relative permittivity of intramembrane cavity (dimensionless) +... \ No newline at end of file diff --git a/PointNICE/pltvars.py b/PointNICE/pltvars.py new file mode 100644 index 0000000..e3b427b --- /dev/null +++ b/PointNICE/pltvars.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-08-21 14:33:36 +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-23 14:15:34 + +''' Dictionary of plotting settings for output variables of the model. ''' + + +pltvars = { + + 't': { + 'desc': 'time', + 'label': 'time', + 'unit': 'ms', + 'factor': 1e3, + 'onset': 3e-3 + }, + + 'Z': { + 'desc': 'leaflets deflection', + 'label': 'Z', + 'unit': 'nm', + 'factor': 1e9, + 'min': -1.0, + 'max': 10.0 + }, + + 'ng': { + 'desc': 'gas content', + 'label': 'gas', + 'unit': '10^{-22}\ mol', + 'factor': 1e22, + 'min': 1.0, + 'max': 15.0 + }, + + 'Pac': { + 'desc': 'acoustic pressure', + 'label': 'P_{AC}', + 'unit': 'kPa', + 'factor': 1e-3, + 'alias': 'bls.Pacoustic(t, Adrive * states, Fdrive)' + }, + + 'Pmavg': { + 'desc': 'average intermolecular pressure', + 'label': 'P_M', + 'unit': 'kPa', + 'factor': 1e-3, + 'alias': 'bls.PMavgpred(data["Z"])' + }, + + 'Telastic': { + 'desc': 'leaflet elastic tension', + 'label': 'T_E', + 'unit': 'mN/m', + 'factor': 1e3, + 'alias': 'bls.TEleaflet(data["Z"])' + }, + + 'Qm': { + 'desc': 'charge density', + 'label': 'Q_m', + 'unit': 'nC/cm^2', + 'factor': 1e5, + 'min': -100, + 'max': 50 + }, + + 'Cm': { + 'desc': 'membrane capacitance', + 'label': 'C_m', + 'unit': 'uF/cm^2', + 'factor': 1e2, + 'min': 0.0, + 'max': 1.5, + 'alias': 'np.array([bls.Capct(ZZ) for ZZ in data["Z"]])' + }, + + 'Vm': { + 'desc': 'membrane potential', + 'label': 'V_m', + 'unit': 'mV', + 'factor': 1e3, + 'alias': 'data["Qm"] / np.array([bls.Capct(ZZ) for ZZ in data["Z"]])' + }, + + + 'iL': { + 'desc': 'leakage current', + 'label': 'I_L', + 'unit': 'mA/cm^2', + 'factor': 1, + 'alias': 'neuron.currL(data["Qm"] * 1e3 / np.array([bls.Capct(ZZ) for ZZ in data["Z"]]))' + }, + + + 'm': { + 'desc': 'iNa activation gate opening', + 'label': 'm-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'h': { + 'desc': 'iNa inactivation gate opening', + 'label': 'h-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'n': { + 'desc': 'iK activation gate opening', + 'label': 'n-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'p': { + 'desc': 'iM activation gate opening', + 'label': 'p-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 's': { + 'desc': 'iCa activation gates opening', + 'label': 's-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'u': { + 'desc': 'iCa inactivation gates opening', + 'label': 'u-gate', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'O': { + 'desc': 'iH activation gate opening', + 'label': 'O', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'OL': { + 'desc': 'iH activation gate locked-opening', + 'label': 'O_L', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1, + 'alias': '1 - data["O"] - data["C"]' + }, + + 'P0': { + 'desc': 'iH regulating factor activation', + 'label': 'P_0', + 'unit': None, + 'factor': 1, + 'min': -0.1, + 'max': 1.1 + }, + + 'C_Ca': { + 'desc': 'sumbmembrane Ca2+ concentration', + 'label': '[Ca^{2+}]_i', + 'unit': 'uM', + 'factor': 1e6, + 'min': 0, + 'max': 150.0 + }, + + 'C_Na_arb': { + 'key': 'C_Na', + 'desc': 'submembrane Na+ concentration', + 'label': '[Na^+]', + 'unit': 'arb.', + 'factor': 1 + }, + + 'C_Na_arb_activation': { + 'key': 'A_Na', + 'desc': 'Na+ dependent PumpNa current activation', + 'label': 'A_{Na^+}', + 'unit': 'arb', + 'factor': 1 + }, + + 'C_Ca_arb': { + 'key': 'C_Ca', + 'desc': 'submembrane Ca2+ concentration', + 'label': '[Ca^{2+}]', + 'unit': 'arb.', + 'factor': 1 + }, + + 'C_Ca_arb_activation': { + 'key': 'A_Ca', + 'desc': 'Ca2+ dependent Potassium current activation', + 'label': 'A_{Na^{2+}}', + 'unit': 'arb', + 'factor': 1 + }, + + 'VL': { + 'constant': 'neuron.VL', + 'desc': 'non-specific leakage current resting potential', + 'label': 'A_{Na^{2+}}', + 'unit': 'mV', + 'factor': 1e0 + }, + + 'Veff': { + 'key': 'V', + 'desc': 'effective membrane potential', + 'label': 'V_{m, eff}', + 'unit': 'mV', + 'factor': 1e0 + }, + + 'alpham': { + 'desc': 'iNa m-gate activation rate', + 'label': '\\alpha_{m,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betam': { + 'desc': 'iNa m-gate inactivation rate', + 'label': '\\beta_{m,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'alphah': { + 'desc': 'iNa h-gate activation rate', + 'label': '\\alpha_{h,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betah': { + 'desc': 'iNa h-gate inactivation rate', + 'label': '\\beta_{h,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'alphan': { + 'desc': 'iK n-gate activation rate', + 'label': '\\alpha_{n,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betan': { + 'desc': 'iK n-gate inactivation rate', + 'label': '\\beta_{n,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'alphap': { + 'desc': 'iM p-gate activation rate', + 'label': '\\alpha_{p,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betap': { + 'desc': 'iM p-gate inactivation rate', + 'label': '\\beta_{p,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'alphas': { + 'desc': 'iT s-gate activation rate', + 'label': '\\alpha_{s,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betas': { + 'desc': 'iT s-gate inactivation rate', + 'label': '\\beta_{s,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'alphau': { + 'desc': 'iT u-gate activation rate', + 'label': '\\alpha_{u,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + }, + + 'betau': { + 'desc': 'iT u-gate inactivation rate', + 'label': '\\beta_{u,\ eff}', + 'unit': 'ms^-1', + 'factor': 1e-3 + } +} diff --git a/PointNICE/solvers/SolverElec.py b/PointNICE/solvers/SolverElec.py new file mode 100644 index 0000000..de7e248 --- /dev/null +++ b/PointNICE/solvers/SolverElec.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-09-29 16:16:19 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-03 15:09:53 + + +import logging +import numpy as np +import scipy.integrate as integrate + +# Get package logger +logger = logging.getLogger('PointNICE') + + +class SolverElec: + + def __init__(self): + # Do nothing + logger.info('Elec solver initialization') + + + def eqHH(self, _, y, channel_mech, Iinj): + ''' Compute the derivatives of a HH system variables for a + specific value of injected current. + + :param t: time value (s, unused) + :param y: vector of HH system variables at time t + :param channel_mech: channels mechanism object + :param Iinj: injected current (mA/m2) + :return: vector of HH system derivatives at time t + ''' + + Vm, *states = y + Iionic = channel_mech.currNet(Vm, states) # mA/m2 + dVmdt = (- Iionic + Iinj) / channel_mech.Cm0 # mV/s + dstates = channel_mech.derStates(Vm, states) + return [dVmdt, *dstates] + + + def runSim(self, channel_mech, Astim, tstim, toffset, tonset=10e-3): + ''' Compute solutions of a neuron's HH system for a specific set of + electrical stimulation parameters, using a classic integration scheme. + + :param channel_mech: channels mechanism object + :param Astim: pulse amplitude (mA/m2) + :param tstim: pulse duration (s) + :param toffset: offset duration (s) + :param tonset: onset duration (s) + :return: 2-tuple with the time profile and solution matrix + ''' + + # Set time vector + ttot = tonset + tstim + toffset + dt = 1e-4 # s + nsamples = int(np.round(ttot / dt)) + t = np.linspace(0.0, ttot, nsamples) - tonset + + # Set pulse vector + n_onset = int(np.round(tonset / dt)) + n_stim = int(np.round(tstim / dt)) + n_offset = int(np.round(toffset / dt)) + pulse = np.concatenate((np.zeros(n_onset), Astim * np.ones(n_stim), np.zeros(n_offset))) + + # Create solver + solver = integrate.ode(self.eqHH) + solver.set_integrator('lsoda', nsteps=1000) + + # Set initial conditions + y0 = [channel_mech.Vm0, *channel_mech.states0] + nvar = len(y0) + + # Run simulation + y = np.empty((nsamples - 1, nvar)) + solver.set_initial_value(y0, t[0]) + k = 1 + while solver.successful() and k <= nsamples - 1: + solver.set_f_params(channel_mech, pulse[k]) + solver.integrate(t[k]) + y[k - 1, :] = solver.y + k += 1 + + y = np.concatenate((np.atleast_2d(y0), y), axis=0) + return (t, y) + + + def eqHH_VClamp(self, _, y, channel_mech, Vc): + ''' Compute the derivatives of a HH system variables for a + specific value of clamped voltage. + + :param t: time value (s, unused) + :param y: vector of HH system variables at time t + :param channel_mech: channels mechanism object + :param Vc: clamped voltage (mV) + :return: vector of HH system derivatives at time t + ''' + + return channel_mech.derStates(Vc, y) + + + + def runVClamp(self, channel_mech, Vclamp, tclamp, toffset, tonset=10e-3): + ''' Compute solutions of a neuron's HH system for a specific set of + voltage clamp parameters, using a classic integration scheme. + + :param channel_mech: channels mechanism object + :param Vclamp: clamped voltage (mV) + :param toffset: offset duration (s) + :param tclamp: clamp duration (s) + :param tonset: onset duration (s) + :return: 2-tuple with the time profile and solution matrix + ''' + + # Set time vector + ttot = tonset + tclamp + toffset + dt = 1e-4 # s + nsamples = int(np.round(ttot / dt)) + t = np.linspace(0.0, ttot, nsamples) - tonset + + # Set clamp vector + n_onset = int(np.round(tonset / dt)) + n_clamp = int(np.round(tclamp / dt)) + n_offset = int(np.round(toffset / dt)) + clamp = np.concatenate((np.zeros(n_onset), Vclamp * np.ones(n_clamp), np.zeros(n_offset))) + + # Create solver + solver = integrate.ode(self.eqHH_VClamp) + solver.set_integrator('lsoda', nsteps=1000) + + # Set initial conditions + y0 = channel_mech.states0 + nvar = len(y0) + + # Run simulation + y = np.empty((nsamples - 1, nvar)) + solver.set_initial_value(y0, t[0]) + k = 1 + while solver.successful() and k <= nsamples - 1: + solver.set_f_params(channel_mech, clamp[k]) + solver.integrate(t[k]) + y[k - 1, :] = solver.y + k += 1 + + y = np.concatenate((np.atleast_2d(y0), y), axis=0) + return (t, y) diff --git a/PointNICE/solvers/SolverUS.py b/PointNICE/solvers/SolverUS.py new file mode 100644 index 0000000..4bb7279 --- /dev/null +++ b/PointNICE/solvers/SolverUS.py @@ -0,0 +1,796 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-09-29 16:16:19 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 17:30:03 + +import os +import warnings +import pickle +import logging +import numpy as np +import scipy.integrate as integrate +from scipy.interpolate import interp2d + +from ..bls import BilayerSonophore +from ..utils import * +from ..constants import * + + +# Get package logger +logger = logging.getLogger('PointNICE') + + +class SolverUS(BilayerSonophore): + """ This class extends the BilayerSonophore class by adding a biophysical + Hodgkin-Huxley model on top of the mechanical BLS model. """ + + def __init__(self, geom, params, channel_mech, Fdrive): + """ Constructor of the class. + + :param geom: BLS geometric constants dictionary + :param params: BLS biomechanical and biophysical parameters dictionary + :param channel_mech: channels mechanism object + :param Fdrive: frequency of acoustic perturbation (Hz) + """ + + # Check validity of input parameters + assert Fdrive >= 0., 'Driving frequency must be positive' + + # Initialize BLS object + Cm0 = channel_mech.Cm0 + Vm0 = channel_mech.Vm0 + BilayerSonophore.__init__(self, geom, params, Fdrive, Cm0, Cm0 * Vm0 * 1e-3) + + logger.info('US solver initialization with %s channel mechanism', channel_mech.name) + + + def eqHH(self, t, y, channel_mech, Cm): + """ Compute the derivatives of the n-ODE HH system variables, + based on a value of membrane capacitance. + + + :param t: specific instant in time (s) + :param y: vector of HH system variables at time t + :param channel_mech: channels mechanism object + :param Cm: membrane capacitance (F/m2) + :return: vector of HH system derivatives at time t + """ + + # Split input vector explicitly + Qm, *states = y + + # Compute membrane potential + Vm = Qm / Cm * 1e3 # mV + + # Compute derivatives + dQm = - channel_mech.currNet(Vm, states) * 1e-3 # A/m2 + dstates = channel_mech.derStates(Vm, states) + + # Return derivatives vector + return [dQm, *dstates] + + + def eqHHeff(self, t, y, channel_mech, A, interpolators): + """ Compute the derivatives of the n-ODE effective HH system variables, + based on 2-dimensional linear interpolation of "effective" coefficients + that summarize the system's behaviour over an acoustic cycle. + + :param t: specific instant in time (s) + :param y: vector of HH system variables at time t + :param channel_mech: channels mechanism object + :param A: acoustic drive amplitude (Pa) + :param channels: Channel object to compute a specific electrical membrane dynamics + :param interpolators: dictionary of 2-dimensional linear interpolators + of "effective" coefficients over the 2D amplitude x charge input domain. + :return: vector of effective system derivatives at time t + """ + + # Split input vector explicitly + Qm, *states = y + + # Compute charge and channel states variation + Vm = interpolators['V'](A, Qm) # mV + dQmdt = - channel_mech.currNet(Vm, states) * 1e-3 + dstates = channel_mech.derStatesEff(A, Qm, states, interpolators) + + # Return derivatives vector + return [dQmdt, *dstates] + + + def eqFull(self, t, y, channel_mech, Adrive, Fdrive, phi): + """ Compute the derivatives of the (n+3) ODE full NBLS system variables. + + :param t: specific instant in time (s) + :param y: vector of state variables + :param channel_mech: channels mechanism object + :param Adrive: acoustic drive amplitude (Pa) + :param Fdrive: acoustic drive frequency (Hz) + :param phi: acoustic drive phase (rad) + :return: vector of derivatives + """ + + # Compute derivatives of mechanical and electrical systems + dydt_mech = self.eqMech(t, y[:3], Adrive, Fdrive, y[3], phi) + dydt_elec = self.eqHH(t, y[3:], channel_mech, self.Capct(y[1])) + + # return concatenated output + return dydt_mech + dydt_elec + + + def getEffCoeffs(self, channel_mech, Fdrive, Adrive, Qm, phi=np.pi): + """ Compute "effective" coefficients of the HH system for a specific combination + of stimulus frequency, stimulus amplitude and charge density. + + A short mechanical simulation is run while imposing the specific charge density, + until periodic stabilization. The HH coefficients are then averaged over the last + acoustic cycle to yield "effective" coefficients. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param Qm: imposed charge density (C/m2) + :param phi: acoustic drive phase (rad) + :return: tuple with the effective potential, rates, and gas content coefficients + """ + + # Run simulation and retrieve deflection and gas content vectors from last cycle + (_, y, _) = self.runMech(Fdrive, Adrive, Qm, phi) + (Z, ng) = y + Z_last = Z[-NPC_FULL:] # m + + # Compute membrane potential vector + Vm = np.array([Qm / self.Capct(ZZ) * 1e3 for ZZ in Z_last]) # mV + + # Compute average cycle value for membrane potential and rate constants + Vm_eff = np.mean(Vm) # mV + rates_eff = channel_mech.getEffRates(Vm) + + # Take final cycle value for gas content + ng_eff = ng[-1] # mole + + return (Vm_eff, rates_eff, ng_eff) + + + def createLookup(self, channel_mech, Fdrive, amps, charges, phi=np.pi): + """ Run simulations of the mechanical system for a multiple combinations of + imposed charge densities and acoustic amplitudes, compute effective coefficients + and store them as 2D arrays in a lookup file. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param amps: array of acoustic drive amplitudes (Pa) + :param charges: array of charge densities (C/m2) + :param phi: acoustic drive phase (rad) + """ + + # Check validity of stimulation parameters + assert Fdrive > 0, 'Driving frequency must be strictly positive' + assert np.amin(amps) >= 0, 'Acoustic pressure amplitudes must be positive' + + logger.info('Creating lookup table for f = %.2f kHz', Fdrive * 1e-3) + + # Initialize 3D array to store effective coefficients + nA = amps.size + nQ = charges.size + Vm = np.empty((nA, nQ)) + ng = np.empty((nA, nQ)) + nrates = len(channel_mech.coeff_names) + rates = np.empty((nA, nQ, nrates)) + + # Loop through all (A, Q) combinations + isim = 0 + for i in range(nA): + for j in range(nQ): + isim += 1 + # Run short simulation and store effective coefficients + logger.info('sim %u/%u (A = %.2f kPa, Q = %.2f nC/cm2)', + isim, nA * nQ, amps[i] * 1e-3, charges[j] * 1e5) + (Vm[i, j], rates[i, j, :], ng[i, j]) = self.getEffCoeffs(channel_mech, Fdrive, + amps[i], charges[j], phi) + + # Convert coefficients array into dictionary with specific names + lookup_dict = {channel_mech.coeff_names[k]: rates[:, :, k] for k in range(nrates)} + lookup_dict['V'] = Vm # mV + lookup_dict['ng'] = ng # mole + + # Add input amplitude and charge arrays to dictionary + lookup_dict['A'] = amps # Pa + lookup_dict['Q'] = charges # C/m2 + + # Save dictionary in lookup file + lookup_file = '{}_lookups_a{:.1f}nm_f{:.1f}kHz.pkl'.format(channel_mech.name, + self.a * 1e9, + Fdrive * 1e-3) + logger.info('Saving effective coefficients arrays in lookup file: "%s"', lookup_file) + lookup_filepath = '{0}/{1}/{2}'.format(getLookupDir(), channel_mech.name, lookup_file) + with open(lookup_filepath, 'wb') as fh: + pickle.dump(lookup_dict, fh) + + + def runClassic(self, channel_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, phi=np.pi): + """ Compute solutions of the system for a specific set of + US stimulation parameters, using a classic integration scheme. + + The first iteration uses the quasi-steady simplification to compute + the initiation of motion from a flat leaflet configuration. Afterwards, + the ODE system is solved iteratively until completion. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param tstim: duration of US stimulation (s) + :param toffset: duration of the offset (s) + :param PRF: pulse repetition frequency (Hz) + :param DF: pulse duty factor (-) + :param phi: acoustic drive phase (rad) + :return: 3-tuple with the time profile, the effective solution matrix and a state vector + """ + + # Raise warnings as error + warnings.filterwarnings('error') + + # Initialize system solver + solver_full = integrate.ode(self.eqFull) + solver_full.set_integrator('lsoda', nsteps=SOLVER_NSTEPS, ixpr=True) + + # Determine system time step + Tdrive = 1 / Fdrive + dt = Tdrive / NPC_FULL + + # Determine proportion of tstim in total integration + stim_prop = tstim / (tstim + toffset) + + # if CW stimulus: divide integration during stimulus into 100 intervals + if DF == 1.0: + PRF = 100 / tstim + + # Compute vector sizes + npulses = int(np.round(PRF * tstim)) + Tpulse_on = DF / PRF + Tpulse_off = (1 - DF) / PRF + n_pulse_on = int(np.round(Tpulse_on / dt)) + n_pulse_off = int(np.round(Tpulse_off / dt)) + n_off = int(np.round(toffset / dt)) + + # Solve quasi-steady equation to compute first deflection value + Z0 = 0.0 + ng0 = self.ng0 + Qm0 = self.Qm0 + Pac1 = self.Pacoustic(dt, Adrive, Fdrive, phi) + Z1 = self.balancedefQS(ng0, Qm0, Pac1) + + # Initialize global arrays + states = np.array([1, 1]) + t = np.array([0., dt]) + y_membrane = np.array([[0., (Z1 - Z0) / dt], [Z0, Z1], [ng0, ng0], [Qm0, Qm0]]) + y_channels = np.tile(channel_mech.states0, (2, 1)).T + y = np.vstack((y_membrane, y_channels)) + nvar = y.shape[0] + + # Initialize pulse time and states vectors + t_pulse0 = np.linspace(0, Tpulse_on + Tpulse_off, n_pulse_on + n_pulse_off) + states_pulse = np.concatenate((np.ones(n_pulse_on), np.zeros(n_pulse_off))) + + # Loop through all pulse (ON and OFF) intervals + for i in range(npulses): + + # logger.debug('pulse %u/%u', i + 1, npulses) + printPct(100 * stim_prop * (i + 1) / npulses, 1) + + # Construct and initialize arrays + t_pulse = t_pulse0 + t[-1] + y_pulse = np.empty((nvar, n_pulse_on + n_pulse_off)) + y_pulse[:, 0] = y[:, -1] + + # Initialize iterator + k = 0 + + # Integrate ON system + solver_full.set_f_params(channel_mech, Adrive, Fdrive, phi) + solver_full.set_initial_value(y_pulse[:, k], t_pulse[k]) + while solver_full.successful() and k < n_pulse_on - 1: + k += 1 + solver_full.integrate(t_pulse[k]) + y_pulse[:, k] = solver_full.y + + # Integrate OFF system + solver_full.set_f_params(channel_mech, 0.0, 0.0, 0.0) + solver_full.set_initial_value(y_pulse[:, k], t_pulse[k]) + while solver_full.successful() and k < n_pulse_on + n_pulse_off - 1: + k += 1 + solver_full.integrate(t_pulse[k]) + y_pulse[:, k] = solver_full.y + + # Append pulse arrays to global arrays + states = np.concatenate([states, states_pulse[1:]]) + t = np.concatenate([t, t_pulse[1:]]) + y = np.concatenate([y, y_pulse[:, 1:]], axis=1) + + # Integrate offset interval + t_off = np.linspace(0, toffset, n_off) + t[-1] + states_off = np.zeros(n_off) + y_off = np.empty((nvar, n_off)) + y_off[:, 0] = y[:, -1] + solver_full.set_initial_value(y_off[:, 0], t_off[0]) + solver_full.set_f_params(channel_mech, 0.0, 0.0, 0.0) + k = 0 + while solver_full.successful() and k < n_off - 1: + k += 1 + solver_full.integrate(t_off[k]) + y_off[:, k] = solver_full.y + + # Concatenate offset arrays to global arrays + states = np.concatenate([states, states_off[1:]]) + t = np.concatenate([t, t_off[1:]]) + y = np.concatenate([y, y_off[:, 1:]], axis=1) + + # Downsample arrays in time-domain to reduce overall size + t = t[::CLASSIC_DS_FACTOR] + y = y[:, ::CLASSIC_DS_FACTOR] + states = states[::CLASSIC_DS_FACTOR] + + # return output variables + return (t, y[1:, :], states) + + + def runEffective(self, channel_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, dt=DT_EFF): + """ Compute solutions of the system for a specific set of + US stimulation parameters, using charge-predicted "effective" + coefficients to solve the HH equations at each step. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param tstim: duration of US stimulation (s) + :param toffset: duration of the offset (s) + :param PRF: pulse repetition frequency (Hz) + :param DF: pulse duty factor (-) + :param dt: integration time step (s) + :return: 3-tuple with the time profile, the effective solution matrix and a state vector + """ + + # Raise warnings as error + warnings.filterwarnings('error') + + # Check lookup file existence + lookup_file = '{}_lookups_a{:.1f}nm_f{:.1f}kHz.pkl'.format(channel_mech.name, + self.a * 1e9, + Fdrive * 1e-3) + lookup_path = '{}/{}/{}'.format(getLookupDir(), channel_mech.name, lookup_file) + assert os.path.isfile(lookup_path), 'No lookup file for this stimulation frequency' + + # Load coefficients + with open(lookup_path, 'rb') as fh: + coeffs = pickle.load(fh) + + # Check that pressure amplitude is within lookup range + Amax = np.amax(coeffs['A']) + 1e-9 # adding margin to compensate for eventual round error + assert Adrive <= Amax, 'Amplitude must be within [0, {:.1f}] kPa'.format(Amax * 1e-3) + + # Initialize interpolators + interpolators = {cn: interp2d(coeffs['A'], coeffs['Q'], np.transpose(coeffs[cn])) + for cn in channel_mech.coeff_names} + interpolators['V'] = interp2d(coeffs['A'], coeffs['Q'], np.transpose(coeffs['V'])) + interpolators['ng'] = interp2d(coeffs['A'], coeffs['Q'], np.transpose(coeffs['ng'])) + + # Initialize system solvers + solver_on = integrate.ode(self.eqHHeff) + solver_on.set_integrator('lsoda', nsteps=SOLVER_NSTEPS) + solver_on.set_f_params(channel_mech, Adrive, interpolators) + solver_off = integrate.ode(self.eqHH) + solver_off.set_integrator('lsoda', nsteps=SOLVER_NSTEPS) + + # if CW stimulus: change PRF to have exactly one integration interval during stimulus + if DF == 1.0: + PRF = 1 / tstim + + # Compute vector sizes + npulses = int(np.round(PRF * tstim)) + Tpulse_on = DF / PRF + Tpulse_off = (1 - DF) / PRF + n_pulse_on = int(np.round(Tpulse_on / dt)) + 1 + n_pulse_off = int(np.round(Tpulse_off / dt)) + n_off = int(np.round(toffset / dt)) + + # Initialize global arrays + states = np.array([1]) + t = np.array([0.0]) + y = np.atleast_2d(np.insert(channel_mech.states0, 0, self.Qm0)).T + nvar = y.shape[0] + Zeff = np.array([0.0]) + ngeff = np.array([self.ng0]) + + # Initializing accurate pulse time vector + t_pulse_on = np.linspace(0, Tpulse_on, n_pulse_on) + t_pulse_off = np.linspace(dt, Tpulse_off, n_pulse_off) + Tpulse_on + t_pulse0 = np.concatenate([t_pulse_on, t_pulse_off]) + states_pulse = np.concatenate((np.ones(n_pulse_on), np.zeros(n_pulse_off))) + + # Loop through all pulse (ON and OFF) intervals + for i in range(npulses): + + # Construct and initialize arrays + t_pulse = t_pulse0 + t[-1] + y_pulse = np.empty((nvar, n_pulse_on + n_pulse_off)) + ngeff_pulse = np.empty(n_pulse_on + n_pulse_off) + Zeff_pulse = np.empty(n_pulse_on + n_pulse_off) + y_pulse[:, 0] = y[:, -1] + ngeff_pulse[0] = ngeff[-1] + Zeff_pulse[0] = Zeff[-1] + + # Initialize iterator + k = 0 + + # Integrate ON system + solver_on.set_initial_value(y_pulse[:, k], t_pulse[k]) + while solver_on.successful() and k < n_pulse_on - 1: + k += 1 + solver_on.integrate(t_pulse[k]) + y_pulse[:, k] = solver_on.y + ngeff_pulse[k] = interpolators['ng'](Adrive, y_pulse[0, k]) # mole + Zeff_pulse[k] = self.balancedefQS(ngeff_pulse[k], y_pulse[0, k]) # m + + # Integrate OFF system + solver_off.set_initial_value(y_pulse[:, k], t_pulse[k]) + solver_off.set_f_params(channel_mech, self.Capct(Zeff_pulse[k])) + while solver_off.successful() and k < n_pulse_on + n_pulse_off - 1: + k += 1 + solver_off.integrate(t_pulse[k]) + y_pulse[:, k] = solver_off.y + ngeff_pulse[k] = interpolators['ng'](0.0, y_pulse[0, k]) # mole + Zeff_pulse[k] = self.balancedefQS(ngeff_pulse[k], y_pulse[0, k]) # m + solver_off.set_f_params(channel_mech, self.Capct(Zeff_pulse[k])) + + # Append pulse arrays to global arrays + states = np.concatenate([states[:-1], states_pulse]) + t = np.concatenate([t, t_pulse[1:]]) + y = np.concatenate([y, y_pulse[:, 1:]], axis=1) + Zeff = np.concatenate([Zeff, Zeff_pulse[1:]]) + ngeff = np.concatenate([ngeff, ngeff_pulse[1:]]) + + # Integrate offset interval + t_off = np.linspace(0, toffset, n_off) + t[-1] + states_off = np.zeros(n_off) + y_off = np.empty((nvar, n_off)) + ngeff_off = np.empty(n_off) + Zeff_off = np.empty(n_off) + + y_off[:, 0] = y[:, -1] + ngeff_off[0] = ngeff[-1] + Zeff_off[0] = Zeff[-1] + solver_off.set_initial_value(y_off[:, 0], t_off[0]) + solver_off.set_f_params(channel_mech, self.Capct(Zeff_pulse[k])) + k = 0 + while solver_off.successful() and k < n_off - 1: + k += 1 + solver_off.integrate(t_off[k]) + y_off[:, k] = solver_off.y + ngeff_off[k] = interpolators['ng'](0.0, y_off[0, k]) # mole + Zeff_off[k] = self.balancedefQS(ngeff_off[k], y_off[0, k]) # m + solver_off.set_f_params(channel_mech, self.Capct(Zeff_off[k])) + + # Concatenate offset arrays to global arrays + states = np.concatenate([states, states_off[1:]]) + t = np.concatenate([t, t_off[1:]]) + y = np.concatenate([y, y_off[:, 1:]], axis=1) + Zeff = np.concatenate([Zeff, Zeff_off[1:]]) + ngeff = np.concatenate([ngeff, ngeff_off[1:]]) + + # Add Zeff and ngeff to solution matrix + y = np.vstack([Zeff, ngeff, y]) + + # return output variables + return (t, y, states) + + + def runHybrid(self, channel_mech, Fdrive, Adrive, tstim, toffset, phi=np.pi): + """ Compute solutions of the system for a specific set of + US stimulation parameters, using a hybrid integration scheme. + + The first iteration uses the quasi-steady simplification to compute + the initiation of motion from a flat leaflet configuration. Afterwards, + the NBLS ODE system is solved iteratively for "slices" of N-microseconds, + in a 2-steps scheme: + + - First, the full (n+3) ODE system is integrated for a few acoustic cycles + until Z and ng reach a stable periodic solution (limit cycle) + - Second, the signals of the 3 mechanical variables over the last acoustic + period are selected and resampled to a far lower sampling rate + - Third, the HH n-ODE system is integrated for the remaining time of the + slice, using periodic expansion of the mechanical signals to precompute + the values of capacitance. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param tstim: duration of US stimulation (s) + :param toffset: duration of the offset (s) + :param phi: acoustic drive phase (rad) + :return: 3-tuple with the time profile, the solution matrix and a state vector + + .. warning:: This method cannot handle pulsed stimuli + """ + + # Raise warnings as error + warnings.filterwarnings('error') + + # Initialize full and HH systems solvers + solver_full = integrate.ode(self.eqFull) + solver_full.set_f_params(channel_mech, Adrive, Fdrive, phi) + solver_full.set_integrator('lsoda', nsteps=SOLVER_NSTEPS) + solver_hh = integrate.ode(self.eqHH) + solver_hh.set_integrator('dop853', nsteps=SOLVER_NSTEPS, atol=1e-12) + + # Determine full and HH systems time steps + Tdrive = 1 / Fdrive + dt_full = Tdrive / NPC_FULL + dt_hh = Tdrive / NPC_HH + n_full_per_hh = int(NPC_FULL / NPC_HH) + t_full_cycle = np.linspace(0, Tdrive - dt_full, NPC_FULL) + t_hh_cycle = np.linspace(0, Tdrive - dt_hh, NPC_HH) + + # Determine number of samples in prediction vectors + npc_pred = NPC_FULL - n_full_per_hh + 1 + + # Solve quasi-steady equation to compute first deflection value + Z0 = 0.0 + ng0 = self.ng0 + Qm0 = self.Qm0 + Pac1 = self.Pacoustic(dt_full, Adrive, Fdrive, phi) + Z1 = self.balancedefQS(ng0, Qm0, Pac1) + + # Initialize global arrays + states = np.array([1, 1]) + t = np.array([0., dt_full]) + y_membrane = np.array([[0., (Z1 - Z0) / dt_full], [Z0, Z1], [ng0, ng0], [Qm0, Qm0]]) + y_channels = np.tile(channel_mech.states0, (2, 1)).T + y = np.vstack((y_membrane, y_channels)) + nvar = y.shape[0] + + # For each hybrid integration interval + irep = 0 + sim_error = False + while not sim_error and t[-1] < tstim + toffset: + + # Integrate full system for a few acoustic cycles until stabilization + periodic_conv = False + j = 0 + ng_last = None + Z_last = None + while not sim_error and not periodic_conv: + if t[-1] > tstim: + solver_full.set_f_params(channel_mech, 0.0, 0.0, 0.0) + t_full = t_full_cycle + t[-1] + dt_full + y_full = np.empty((nvar, NPC_FULL)) + y0_full = y[:, -1] + solver_full.set_initial_value(y0_full, t[-1]) + k = 0 + try: # try to integrate and catch errors/warnings + while solver_full.successful() and k <= NPC_FULL - 1: + solver_full.integrate(t_full[k]) + y_full[:, k] = solver_full.y + assert (y_full[1, k] > -0.5 * self.Delta), 'Deflection out of range' + k += 1 + except (Warning, AssertionError) as inst: + sim_error = True + logger.error('Full system integration error at step %u', k) + print(inst) + + # Compare Z and ng signals over the last 2 acoustic periods + if j > 0 and rmse(Z_last, y_full[1, :]) < Z_ERR_MAX \ + and rmse(ng_last, y_full[2, :]) < NG_ERR_MAX: + periodic_conv = True + + # Update last vectors for next comparison + Z_last = y_full[1, :] + ng_last = y_full[2, :] + + # Concatenate time and solutions to global vectors + states = np.concatenate([states, np.ones(NPC_FULL)], axis=0) + t = np.concatenate([t, t_full], axis=0) + y = np.concatenate([y, y_full], axis=1) + + # Increment loop index + j += 1 + + # Retrieve last period of the 3 mechanical variables to propagate in HH system + t_last = t[-npc_pred:] + mech_last = y[0:3, -npc_pred:] + + # print('convergence after {} cycles'.format(j)) + + # Downsample signals to specified HH system time step + (_, mech_pred) = DownSample(t_last, mech_last, NPC_HH) + + # Integrate HH system until certain dQ or dT is reached + Q0 = y[3, -1] + dQ = 0.0 + t0_interval = t[-1] + dt_interval = 0.0 + j = 0 + if t[-1] < tstim: + tlim = tstim + else: + tlim = tstim + toffset + while (not sim_error and t[-1] < tlim + and (np.abs(dQ) < DQ_UPDATE or dt_interval < DT_UPDATE)): + t_hh = t_hh_cycle + t[-1] + dt_hh + y_hh = np.empty((nvar - 3, NPC_HH)) + y0_hh = y[3:, -1] + solver_hh.set_initial_value(y0_hh, t[-1]) + k = 0 + try: # try to integrate and catch errors/warnings + while solver_hh.successful() and k <= NPC_HH - 1: + solver_hh.set_f_params(channel_mech, self.Capct(mech_pred[1, k])) + solver_hh.integrate(t_hh[k]) + y_hh[:, k] = solver_hh.y + k += 1 + except (Warning, AssertionError) as inst: + sim_error = True + logger.error('HH system integration error at step %u', k) + print(inst) + + # Concatenate time and solutions to global vectors + states = np.concatenate([states, np.zeros(NPC_HH)], axis=0) + t = np.concatenate([t, t_hh], axis=0) + y = np.concatenate([y, np.concatenate([mech_pred, y_hh], axis=0)], axis=1) + + # Compute charge variation from interval beginning + dQ = y[3, -1] - Q0 + dt_interval = t[-1] - t0_interval + + # Increment loop index + j += 1 + + # Print progress + printPct(100 * (t[-1] / (tstim + toffset)), 1) + + irep += 1 + + # Return output + return (t, y[1:, :], states) + + + def runSim(self, channel_mech, Fdrive, Adrive, tstim, toffset, PRF, DF=1.0, sim_type='effective'): + """ Run simulation of the system for a specific set of + US stimulation parameters. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param tstim: duration of US stimulation (s) + :param toffset: duration of the offset (s) + :param PRF: pulse repetition frequency (Hz) + :param DF: pulse duty factor (-) + :param sim_type: selected integration method + :return: 3-tuple with the time profile, the solution matrix and a state vector + """ + + # Check validity of stimulation parameters + assert Fdrive > 0, 'Driving frequency must be strictly positive' + assert Adrive > 0, 'Acoustic pressure amplitude must be strictly positive' + assert tstim > 0, 'Stimulus duration must be strictly positive' + assert toffset >= 0, 'Stimulus offset must be positive or null' + assert PRF >= 1 / tstim, 'Pulse repetition interval must be smaller than stimulus duration' + assert PRF < Fdrive, 'PRF must be smaller than driving frequency' + assert DF > 0 and DF <= 1, 'Duty cycle must be within [0; 1)' + sim_types = ('classic, effective, hybrid') + assert sim_type in sim_types, 'Allowed simulation types are {}'.format(sim_types) + + # Call appropriate simulation function + if sim_type == 'classic': + return self.runClassic(channel_mech, Fdrive, Adrive, tstim, toffset, PRF, DF) + elif sim_type == 'effective': + return self.runEffective(channel_mech, Fdrive, Adrive, tstim, toffset, PRF, DF) + elif sim_type == 'hybrid': + assert DF == 1.0, 'Hybrid method can only handle continuous wave stimuli' + return self.runHybrid(channel_mech, Fdrive, Adrive, tstim, toffset) + + + def titrateAmp(self, channel_mech, Fdrive, Arange, tstim, toffset, + PRF=1.5e3, DF=1.0, sim_type='effective'): + """ Use a dichotomic search to determine the threshold acoustic amplitude + needed to obtain neural excitation, for specific stimulation parameters. + + This function is called recursively until an accurate threshold is found. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Arange: bounds of the acoustic amplitude searching interval (Pa) + :param tstim: duration of US stimulation (s) + :param toffset: duration of the offset (s) + :param PRF: pulse repetition frequency (Hz) + :param DF: pulse duty factor (-) + :param sim_type: selected integration method + :return: 5-tuple with the determined amplitude threshold, time profile, + solution matrix, state vector and response latency + """ + + # Check amplitude interval + assert Arange[0] < Arange[1], 'Amplitude bounds must be (lower_bound, upper_bound)' + + # Define current amplitude + Adrive = (Arange[0] + Arange[1]) / 2 + + # Run simulation + (t, y, states) = self.runSim(channel_mech, Fdrive, Adrive, tstim, toffset, + PRF, DF, sim_type) + + # Detect spikes + n_spikes, latency, _ = detectSpikes(t, y[2, :], SPIKE_MIN_QAMP, SPIKE_MIN_DT) + logger.info('%.2f kPa ---> %u spike%s detected', Adrive * 1e-3, n_spikes, + "s" if n_spikes > 1 else "") + + # If accurate threshold is found, return simulation results + if (Arange[1] - Arange[0]) <= TITRATION_DA_THR and n_spikes == 1: + return (Adrive, t, y, states, latency) + + # Otherwise, refine titration interval and iterate recursively + else: + if n_spikes == 0: + new_Arange = (Adrive, Arange[1]) + else: + new_Arange = (Arange[0], Adrive) + return self.titrateAmp(channel_mech, Fdrive, new_Arange, tstim, toffset, + PRF, DF, sim_type) + + + def titrateDur(self, channel_mech, Fdrive, Adrive, trange, toffset, + PRF=1.5e3, DF=1.0, sim_type='effective'): + """ Use a dichotomic search to determine the threshold stimulus duration + needed to obtain neural excitation, for specific stimulation parameters. + + This function is called recursively until an accurate threshold is found. + + :param channel_mech: channels mechanism object + :param Fdrive: acoustic drive frequency (Hz) + :param Adrive: acoustic drive amplitude (Pa) + :param trange: bounds of the stimulus duration (s) + :param toffset: duration of the offset (s) + :param PRF: pulse repetition frequency (Hz) + :param DF: pulse duty factor (-) + :param sim_type: selected integration method + :return: 5-tuple with the determined duration threshold, time profile, + solution matrix, state vector and response latency + """ + + # Check duration interval + assert trange[0] < trange[1], 'Duration bounds must be (lower_bound, upper_bound)' + + # Define current duration + tstim = (trange[0] + trange[1]) / 2 + + # Run simulation + (t, y, states) = self.runSim(channel_mech, Fdrive, Adrive, tstim, toffset, + PRF, DF, sim_type) + + # Detect spikes + n_spikes, latency, _ = detectSpikes(t, y[2, :], SPIKE_MIN_QAMP, SPIKE_MIN_DT) + logger.info('%.2f ms ---> %u spike%s detected', tstim * 1e3, n_spikes, + "s" if n_spikes > 1 else "") + + # If accurate threshold is found, return simulation results + if (trange[1] - trange[0]) <= TITRATION_DT_THR and n_spikes == 1: + return (tstim, t, y, states, latency) + + # Otherwise, refine titration interval and iterate recursively + else: + if n_spikes == 0: + new_trange = (tstim, trange[1]) + else: + new_trange = (trange[0], tstim) + return self.titrateDur(channel_mech, Fdrive, Adrive, new_trange, toffset, + PRF, DF, sim_type) + + + def titrate(self, channel_mech, Fdrive, x, toffset, PRF, DF, titr_type, sim_type='effective'): + if titr_type == 'amplitude': + return self.titrateAmp(channel_mech, Fdrive, (0.0, 2 * TITRATION_AMAX), x, + toffset, PRF, DF, sim_type) + elif titr_type == 'duration': + return self.titrateDur(channel_mech, Fdrive, x, (0.0, 2 * TITRATION_TMAX), + toffset, PRF, DF, sim_type) + diff --git a/PointNICE/solvers/__init__.py b/PointNICE/solvers/__init__.py new file mode 100644 index 0000000..3becce8 --- /dev/null +++ b/PointNICE/solvers/__init__.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-06 13:36:00 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 14:42:34 + + +from .SolverElec import SolverElec +from .SolverUS import SolverUS +from .utils import * diff --git a/PointNICE/solvers/utils.py b/PointNICE/solvers/utils.py new file mode 100644 index 0000000..5f24e38 --- /dev/null +++ b/PointNICE/solvers/utils.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-08-22 14:33:04 +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 17:16:12 + +""" Utility functions used in simulations """ + +import time +import logging +import pickle +import numpy as np + +from .import SolverUS +from ..constants import * +from ..utils import detectSpikes, xlslog + + +# Get package logger +logger = logging.getLogger('PointNICE') + + +def createSimQueue(amps, durations, offsets, PRFs, DFs): + ''' Create a serialized 2D array of all parameter combinations for a series of individual + parameter sweeps, while avoiding repetition of CW protocols for a given PRF sweep. + + :param amps: list (or 1D-array) of acoustic amplitudes + :param durations: list (or 1D-array) of stimulus durations + :param offsets: list (or 1D-array) of stimulus offsets (paired with durations array) + :param PRFs: list (or 1D-array) of pulse-repetition frequencies + :param DFs: list (or 1D-array) of duty cycle values + :return: 2D-array with (amplitude, duration, offset, PRF, DF) for each stimulation protocol + ''' + + # Convert input to 1D-arrays + amps = np.array(amps) + durations = np.array(durations) + offsets = np.array(offsets) + PRFs = np.array(PRFs) + DFs = np.array(DFs) + + # Create index arrays + iamps = range(len(amps)) + idurs = range(len(durations)) + + # Create empty output matrix + queue = np.empty((1, 5)) + + # Continuous protocols + if 1.0 in DFs: + nCW = len(amps) * len(durations) + arr1 = np.ones(nCW) + iCW_queue = np.array(np.meshgrid(iamps, idurs)).T.reshape(nCW, 2) + CW_queue = np.hstack((amps[iCW_queue[:, 0]], durations[iCW_queue[:, 1]], + offsets[iCW_queue[:, 1]], PRFs.min() * arr1, arr1)) + queue = np.vstack((queue, CW_queue)) + + + # Pulsed protocols + if np.any(DFs != 1.0): + pulsed_DFs = DFs[DFs != 1.0] + iPRFs = range(len(PRFs)) + ipulsed_DFs = range(len(pulsed_DFs)) + nPW = len(amps) * len(durations) * len(PRFs) * len(pulsed_DFs) + iPW_queue = np.array(np.meshgrid(iamps, idurs, iPRFs, ipulsed_DFs)).T.reshape(nPW, 4) + PW_queue = np.hstack((amps[iPW_queue[:, 0]], durations[iPW_queue[:, 1]], + offsets[iPW_queue[:, 1]], PRFs[iPW_queue[:, 2]], + pulsed_DFs[iPW_queue[:, 3]])) + queue = np.vstack((queue, PW_queue)) + + # Return + return queue[1:, :] + + +def runSimBatch(batch_dir, log_filepath, neurons, bls_params, geom, stim_params, sim_type): + ''' Run batch simulations of the system for various neuron types, sonophore and + stimulation parameters. + + :param batch_dir: full path to output directory of batch + :param log_filepath: full path log file of batch + :param neurons: array of channel mechanisms + :param bls_params: BLS biomechanical and biophysical parameters dictionary + :param geom: BLS geometric constants dictionary + :param stim_params: dictionary containing sweeps for all stimulation parameters + :param sim_type: selected integration method + ''' + + # Define naming and logging settings + sim_str_CW = 'sim_{}_CW_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_{}' + sim_str_PW = 'sim_{}_PW_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_PRF{:.2f}kHz_DF{:.2f}_{}' + CW_log = ('%s neuron - CW %s simulation %u/%u (a = %.1f nm, f = %.2f kHz, A = %.2f kPa, ' + 't = %.1f ms)') + PW_log = ('%s neuron - PW %s simulation %u/%u (a = %.1f nm, f = %.2f kHz, A = %.2f kPa, ' + ' t = %.1f ms, PRF = %.2f kHz, DF = %.2f)') + + + logger.info("Starting NICE simulation batch") + + a = geom['a'] + d = geom['d'] + + # Generate simulations queue + sim_queue = createSimQueue(stim_params['amps'], stim_params['durations'], + stim_params['offsets'], stim_params['PRFs'], stim_params['DFs']) + nqueue = sim_queue.shape[0] + + # Run simulations + simcount = 0 + nsims = len(neurons) * len(stim_params['freqs']) * nqueue + for ch_mech in neurons: + for Fdrive in stim_params['freqs']: + try: + # Create SolverUS instance (modulus of embedding tissue depends on frequency) + solver = SolverUS(geom, bls_params, ch_mech, Fdrive) + + for i in range(nqueue): + simcount += 1 + Adrive, tstim, toffset, PRF, DF = sim_queue[i, :] + + # Get date and time info + date_str = time.strftime("%Y.%m.%d") + daytime_str = time.strftime("%H:%M:%S") + + # Log and define naming + if DF == 1.0: + logger.info(CW_log, ch_mech.name, sim_type, simcount, nsims, + a * 1e9, Fdrive * 1e-3, Adrive * 1e-3, tstim * 1e3) + simcode = sim_str_CW.format(ch_mech.name, a * 1e9, Fdrive * 1e-3, + Adrive * 1e-3, tstim * 1e3, sim_type) + else: + logger.info(PW_log, ch_mech.name, sim_type, simcount, nsims, a * 1e9, + Fdrive * 1e-3, Adrive * 1e-3, tstim * 1e3, PRF * 1e-3, DF) + simcode = sim_str_PW.format(ch_mech.name, a * 1e9, Fdrive * 1e-3, + Adrive * 1e-3, tstim * 1e3, PRF * 1e-3, + DF, sim_type) + + # Run simulation + tstart = time.time() + (t, y, states) = solver.runSim(ch_mech, Fdrive, Adrive, tstim, toffset, + PRF, DF, sim_type) + + + Z, ng, Qm, *channels = y + U = np.insert(np.diff(Z) / np.diff(t), 0, 0.0) + tcomp = time.time() - tstart + logger.info('completed in %.2f seconds', tcomp) + + # Store data in dictionary + bls_params['biophys']['Qm0'] = solver.Qm0 + data = { + 'a': a, + 'd': d, + 'params': bls_params, + 'Fdrive': Fdrive, + 'Adrive': Adrive, + 'phi': np.pi, + 'tstim': tstim, + 'toffset': toffset, + 'PRF': PRF, + 'DF': DF, + 't': t, + 'states': states, + 'U': U, + 'Z': Z, + 'ng': ng, + 'Qm': Qm + } + for j in range(len(ch_mech.states_names)): + data[ch_mech.states_names[j]] = channels[j] + + # Export data to PKL file + datafile_name = batch_dir + '/' + simcode + ".pkl" + with open(datafile_name, 'wb') as fh: + pickle.dump(data, fh) + logger.info('simulation data exported to "%s"', datafile_name) + + # Detect spikes on Qm signal + n_spikes, lat, sr = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) + logger.info('%u spike%s detected', n_spikes, "s" if n_spikes > 1 else "") + + # Export key metrics to log file + log = { + 'A': date_str, + 'B': daytime_str, + 'C': ch_mech.name, + 'D': a * 1e9, + 'E': d * 1e6, + 'F': Fdrive * 1e-3, + 'G': Adrive * 1e-3, + 'H': tstim * 1e3, + 'I': PRF * 1e-3 if DF < 1 else 'N/A', + 'J': DF, + 'K': sim_type, + 'L': t.size, + 'M': round(tcomp, 2), + 'N': n_spikes, + 'O': lat * 1e3 if isinstance(lat, float) else 'N/A', + 'P': sr * 1e-3 if isinstance(sr, float) else 'N/A' + } + + if xlslog(log_filepath, 'Data', log) == 1: + logger.info('log exported to "%s"', log_filepath) + else: + logger.error('log export to "%s" aborted', log_filepath) + + except AssertionError as err: + logger.error(err) + + + +def runTitrationBatch(batch_dir, log_filepath, neurons, bls_params, geom, stim_params): + ''' Run batch titrations of the system for various neuron types, sonophore and + stimulation parameters, to determine the threshold of a specific stimulus parameter + for neural excitation. + + :param batch_dir: full path to output directory of batch + :param log_filepath: full path log file of batch + :param neurons: array of channel mechanisms + :param bls_params: BLS biomechanical and biophysical parameters dictionary + :param geom: BLS geometric constants dictionary + :param stim_params: dictionary containing sweeps for all stimulation parameters + ''' + + # Define naming and logging settings + sim_str_CW = 'sim_{}_CW_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_{}' + sim_str_PW = 'sim_{}_PW_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_PRF{:.2f}kHz_DF{:.2f}_{}' + CW_log = ('%s neuron - CW titration %u/%u (a = %.1f nm, f = %.2f kHz, %s = %.2f %s') + PW_log = ('%s neuron - PW titration %u/%u (a = %.1f nm, f = %.2f kHz, %s = %.2f %s, ' + 'PRF = %.2f kHz, DF = %.2f)') + + logger.info("Starting NICE titration batch") + + # Unpack geometrical parameters + a = geom['a'] + d = geom['d'] + + # Define default parameters + sim_type = 'effective' + offset = 30e-3 + + # Determine titration parameter (x) and titrations list + A = {'name': 'A', 'factor': 1e-3, 'unit': 'kPa'} + t = {'name': 't', 'factor': 1e3, 'unit': 'ms'} + if 'durations' not in stim_params: + varin = A + varout = t + titr_type = 'duration' + sim_queue = createSimQueue(stim_params['amps'], [0.], [offset], + stim_params['PRFs'], stim_params['DFs']) + sim_queue = np.delete(sim_queue, 1, axis=1) + + elif 'amps' not in stim_params: + varin = t + varout = A + titr_type = 'amplitude' + sim_queue = createSimQueue([0.], stim_params['durations'], + [offset] * len(stim_params['durations']), + stim_params['PRFs'], stim_params['DFs']) + sim_queue = np.delete(sim_queue, 0, axis=1) + + nqueue = sim_queue.shape[0] + + # Run titrations + simcount = 0 + nsims = len(neurons) * len(stim_params['freqs']) * nqueue + for ch_mech in neurons: + for Fdrive in stim_params['freqs']: + try: + # Create SolverUS instance (modulus of embedding tissue depends on frequency) + solver = SolverUS(geom, bls_params, ch_mech, Fdrive) + + for i in range(nqueue): + simcount += 1 + input_val, toffset, PRF, DF = sim_queue[i, :] + + # Get date and time info + date_str = time.strftime("%Y.%m.%d") + daytime_str = time.strftime("%H:%M:%S") + + # Log and define naming + if DF == 1.0: + logger.info(CW_log, ch_mech.name, simcount, nsims, a * 1e9, Fdrive * 1e-3, + varin['name'], input_val * varin['factor'], varin['unit']) + else: + logger.info(PW_log, ch_mech.name, simcount, nsims, a * 1e9, Fdrive * 1e-3, + varin['name'], input_val * varin['factor'], varin['unit'], + PRF * 1e-3, DF) + + # Run titration + tstart = time.time() + (output_thr, t, y, states, lat) = solver.titrate(ch_mech, Fdrive, input_val, + toffset, PRF, DF, titr_type) + Z, ng, Qm, *channels = y + U = np.insert(np.diff(Z) / np.diff(t), 0, 0.0) + tcomp = time.time() - tstart + logger.info('completed in %.2f s, threshold = %.2f %s', tcomp, + output_thr * varout['factor'], varout['unit']) + + # Sort input and output as amplitude and duration + if titr_type == 'amplitude': + Adrive = output_thr + tstim = input_val + elif titr_type == 'duration': + tstim = output_thr + Adrive = input_val + + # Define output naming + if DF == 1.0: + sim_str_CW = 'sim_{}_CW_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_{}' + simcode = sim_str_CW.format(ch_mech.name, a * 1e9, Fdrive * 1e-3, + Adrive * 1e-3, tstim * 1e3, sim_type) + else: + simcode = sim_str_PW.format(ch_mech.name, a * 1e9, Fdrive * 1e-3, + Adrive * 1e-3, tstim * 1e3, PRF * 1e-3, + DF, sim_type) + + # Store data in dictionary + bls_params['biophys']['Qm0'] = solver.Qm0 + data = { + 'a': a, + 'd': d, + 'params': bls_params, + 'Fdrive': Fdrive, + 'Adrive': Adrive, + 'phi': np.pi, + 'tstim': tstim, + 'toffset': toffset, + 'PRF': PRF, + 'DF': DF, + 't': t, + 'states': states, + 'U': U, + 'Z': Z, + 'ng': ng, + 'Qm': Qm + } + for j in range(len(ch_mech.states_names)): + data[ch_mech.states_names[j]] = channels[j] + + # Export data to PKL file + datafile_name = batch_dir + '/' + simcode + ".pkl" + with open(datafile_name, 'wb') as fh: + pickle.dump(data, fh) + logger.info('simulation data exported to "%s"', datafile_name) + + # Detect spikes on Qm signal + n_spikes, lat, sr = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) + logger.info('%u spike%s detected', n_spikes, "s" if n_spikes > 1 else "") + + # Export key metrics to log file + log = { + 'A': date_str, + 'B': daytime_str, + 'C': ch_mech.name, + 'D': a * 1e9, + 'E': d * 1e6, + 'F': Fdrive * 1e-3, + 'G': Adrive * 1e-3, + 'H': tstim * 1e3, + 'I': PRF * 1e-3 if DF < 1 else 'N/A', + 'J': DF, + 'K': sim_type, + 'L': t.size, + 'M': round(tcomp, 2), + 'N': n_spikes, + 'O': lat * 1e3 if isinstance(lat, float) else 'N/A', + 'P': sr * 1e-3 if isinstance(sr, float) else 'N/A' + } + + if xlslog(log_filepath, 'Data', log) == 1: + logger.info('log exported to "%s"', log_filepath) + else: + logger.error('log export to "%s" aborted', log_filepath) + + except AssertionError as err: + logger.error(err) diff --git a/PointNICE/utils.py b/PointNICE/utils.py new file mode 100644 index 0000000..6911e0e --- /dev/null +++ b/PointNICE/utils.py @@ -0,0 +1,557 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-09-19 22:30:46 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 18:39:31 + +""" Definition of generic utility functions used in other modules """ + +from enum import Enum +from functools import partial +import os +import shutil +import logging +import tkinter as tk +from tkinter import filedialog +from openpyxl import load_workbook +import numpy as np +import yaml +import matplotlib.pyplot as plt + + +# Get package logger +logger = logging.getLogger('PointNICE') + + +class PmCompMethod(Enum): + """ Enum: types of computation method for the intermolecular pressure """ + direct = 1 + predict = 2 + + +def LoadParamsFile(filename): + """ Load a dictionary of parameters for the BLS model from an external yaml file. + + :param filename: name of the input file + :return: parameters dictionary + """ + + logger.info('Loading parameters from "%s"', filename) + with open(filename, 'r') as f: + stream = f.read() + params = yaml.load(stream) + return ParseNestedDict(params) + + +LoadParams = partial(LoadParamsFile, filename=os.path.split(__file__)[0] + '/params.yaml') + + +def getLookupDir(): + """ Return the location of the directory holding lookups files. + + :return: absolute path to the directory + """ + this_dir, _ = os.path.split(__file__) + return this_dir + '/lookups' + + +def ParseNestedDict(dict_in): + """ Loop through a nested dictionary object and convert all string fields + to floats. + """ + for key, value in dict_in.items(): + if isinstance(value, dict): # If value itself is dictionary + dict_in[key] = ParseNestedDict(value) + elif isinstance(dict_in[key], str): + dict_in[key] = float(dict_in[key]) + return dict_in + + +def OpenFilesDialog(filetype, dirname=''): + """ Open a FileOpenDialogBox to select one or multiple file. + + The default directory and file type are given. + + :param dirname: default directory + :param filetype: default file type + :return: tuple of full paths to the chosen filenames + """ + root = tk.Tk() + root.withdraw() + filenames = filedialog.askopenfilenames(filetypes=[(filetype + " files", '.' + filetype)], + initialdir=dirname) + if filenames: + par_dir = os.path.abspath(os.path.join(filenames[0], os.pardir)) + else: + par_dir = None + return (filenames, par_dir) + + +def SaveFigDialog(dirname, filename): + """ Open a FileSaveDialogBox to set the directory and name + of the figure to be saved. + + The default directory and filename are given, and the + default extension is ".pdf" + + :param dirname: default directory + :param filename: default filename + :return: full path to the chosen filename + """ + root = tk.Tk() + root.withdraw() + filename_out = filedialog.asksaveasfilename(defaultextension=".pdf", initialdir=dirname, + initialfile=filename) + return filename_out + + +def xlslog(filename, sheetname, data): + """ Append log data on a new row to specific sheet of excel workbook. + + :param filename: absolute or relative path to the Excel workbook + :param sheetname: name of the Excel spreadsheet to which data is appended + :param data: data structure to be added to specific columns on a new row + :return: boolean indicating success (1) or failure (0) of operation + """ + + try: + wb = load_workbook(filename) + ws = wb[sheetname] + keys = data.keys() + i = 1 + row_data = {} + for k in keys: + row_data[k] = data[k] + i += 1 + ws.append(row_data) + wb.save(filename) + return 1 + except PermissionError: + # If file cannot be accessed for writing because already opened + logger.error('Cannot write to "%s". Close the file and type "Y"', filename) + user_str = input() + if user_str in ['y', 'Y']: + return xlslog(filename, sheetname, data) + else: + return 0 + + +def ImportExcelCol(filename, sheetname, colstr, startrow): + """ Load a specific column of an excel workbook as a numpy array. + + :param filename: absolute or relative path to the Excel workbook + :param sheetname: name of the Excel spreadsheet to which data is appended + :param colstr: string of the column to import + :param startrow: index of the first row to consider + :return: 1D numpy array with the column data + """ + + wb = load_workbook(filename, read_only=True) + ws = wb.get_sheet_by_name(sheetname) + range_start_str = colstr + str(startrow) + range_stop_str = colstr + str(ws.max_row) + tmp = np.array([[i.value for i in j] for j in ws[range_start_str:range_stop_str]]) + return tmp[:, 0] + + +def ConstructMatrix(serialized_inputA, serialized_inputB, serialized_output): + """ Construct a 2D output matrix from serialized input. + + :param serialized_inputA: serialized input variable A + :param serialized_inputB: serialized input variable B + :param serialized_output: serialized output variable + :return: 4-tuple with vectors of unique values of A (m) and B (n), + output variable 2D matrix (m,n) and number of holes in the matrix + """ + + As = np.unique(serialized_inputA) + Bs = np.unique(serialized_inputB) + nA = As.size + nB = Bs.size + + output = np.zeros((nA, nB)) + output[:] = np.NAN + nholes = 0 + for i in range(nA): + iA = np.where(serialized_inputA == As[i]) + for j in range(nB): + iB = np.where(serialized_inputB == Bs[j]) + iMatch = np.intersect1d(iA, iB) + if iMatch.size == 0: + nholes += 1 + elif iMatch.size > 1: + logger.warning('Identical serialized inputs with values (%f, %f)', As[i], Bs[j]) + else: + iMatch = iMatch[0] + output[i, j] = serialized_output[iMatch] + return (As, Bs, output, nholes) + + +def rmse(x1, x2): + """ Compute the root mean square error between two 1D arrays """ + return np.sqrt(((x1 - x2) ** 2).mean()) + + +def rsquared(x1, x2): + ''' compute the R-squared coefficient between two 1D arrays ''' + residuals = x1 - x2 + ss_res = np.sum(residuals**2) + ss_tot = np.sum((x1 - np.mean(x1))**2) + return 1 - (ss_res / ss_tot) + + +def DownSample(t_dense, y, nsparse): + """ Decimate periodic signals to a specified number of samples.""" + + if(y.ndim) > 1: + nsignals = y.shape[0] + else: + nsignals = 1 + y = np.array([y]) + + # determine time step and period of input signal + T = t_dense[-1] - t_dense[0] + dt_dense = t_dense[1] - t_dense[0] + + # resample time vector linearly + t_ds = np.linspace(t_dense[0], t_dense[-1], nsparse) + + # create MAV window + nmav = int(0.03 * T / dt_dense) + if nmav % 2 == 0: + nmav += 1 + mav = np.ones(nmav) / nmav + + # determine signals padding + npad = int((nmav - 1) / 2) + + # determine indexes of sampling on convolved signals + ids = np.round(np.linspace(0, t_dense.size - 1, nsparse)).astype(int) + + y_ds = np.empty((nsignals, nsparse)) + + # loop through signals + for i in range(nsignals): + # pad, convolve and resample + pad_left = y[i, -(npad + 2):-2] + pad_right = y[i, 1:npad + 1] + y_ext = np.concatenate((pad_left, y[i, :], pad_right), axis=0) + y_mav = np.convolve(y_ext, mav, mode='valid') + y_ds[i, :] = y_mav[ids] + + if nsignals == 1: + y_ds = y_ds[0, :] + + return (t_ds, y_ds) + + +def Pressure2Intensity(p, rho, c): + """ Return the spatial peak, pulse average acoustic intensity (ISPPA) + associated with the specified pressure amplitude. + + :param p: pressure amplitude (Pa) + :param rho: medium density (kg/m3) + :param c: speed of sound in medium (m/s) + :return: spatial peak, pulse average acoustic intensity (W/m2) + """ + return p**2 / (2 * rho * c) + + +def Intensity2Pressure(I, rho, c): + """ Return the pressure amplitude associated with the specified + spatial peak, pulse average acoustic intensity (ISPPA). + + :param I: spatial peak, pulse average acoustic intensity (W/m2) + :param rho: medium density (kg/m3) + :param c: speed of sound in medium (m/s) + :return: pressure amplitude (Pa) + """ + return np.sqrt(2 * rho * c * I) + + +def detectPeaks(x, mph=None, mpd=1, threshold=0, edge='rising', + kpsh=False, valley=False, ax=None): + """ Detect peaks in data based on their amplitude and inter-peak distance. """ + + x = np.atleast_1d(x).astype('float64') + if x.size < 3: + return np.array([], dtype=int) + if valley: + x = -x + # find indices of all peaks + dx = x[1:] - x[:-1] + # handle NaN's + indnan = np.where(np.isnan(x))[0] + if indnan.size: + x[indnan] = np.inf + dx[np.where(np.isnan(dx))[0]] = np.inf + ine, ire, ife = np.array([[], [], []], dtype=int) + if not edge: + ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0] + else: + if edge.lower() in ['rising', 'both']: + ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0] + if edge.lower() in ['falling', 'both']: + ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0] + ind = np.unique(np.hstack((ine, ire, ife))) + # handle NaN's + if ind.size and indnan.size: + # NaN's and values close to NaN's cannot be peaks + ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)] + # first and last values of x cannot be peaks + if ind.size and ind[0] == 0: + ind = ind[1:] + if ind.size and ind[-1] == x.size - 1: + ind = ind[:-1] + # remove peaks < minimum peak height + if ind.size and mph is not None: + ind = ind[x[ind] >= mph] + # remove peaks - neighbors < threshold + if ind.size and threshold > 0: + dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0) + ind = np.delete(ind, np.where(dx < threshold)[0]) + # detect small peaks closer than minimum peak distance + if ind.size and mpd > 1: + ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height + idel = np.zeros(ind.size, dtype=bool) + for i in range(ind.size): + if not idel[i]: + # keep peaks with the same height if kpsh is True + idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \ + & (x[ind[i]] > x[ind] if kpsh else True) + idel[i] = 0 # Keep current peak + # remove the small peaks and sort back the indices by their occurrence + ind = np.sort(ind[~idel]) + + return ind + + +def detectPeaksTime(t, y, mph, mtd): + """ Extension of the detectPeaks function to detect peaks in data based on their + amplitude and time difference, with a non-uniform time vector. + + :param t: time vector (not necessarily uniform) + :param y: signal + :param mph: minimal peak height + :param mtd: minimal time difference + :return: array of peak indexes + """ + + # Detect peaks on signal with no restriction on inter-peak distance + raw_indexes = detectPeaks(y, mph, mpd=1) + + if raw_indexes.size > 0: + + # Filter relevant peaks with temporal distance + n_raw = raw_indexes.size + filtered_indexes = np.array([raw_indexes[0]]) + for i in range(1, n_raw): + i1 = filtered_indexes[-1] + i2 = raw_indexes[i] + if t[i2] - t[i1] < mtd: + if y[i2] > y[i1]: + filtered_indexes[-1] = i2 + else: + filtered_indexes = np.append(filtered_indexes, i2) + + # Return peak indexes + return filtered_indexes + else: + return None + + +def detectSpikes(t, Qm, min_amp, min_dt): + ''' Detect spikes on a charge density signal, and + return their number, latency and rate. + + :param t: time vector (s) + :param Qm: charge density vector (C/m2) + :param min_amp: minimal charge amplitude to detect spikes (C/m2) + :param min_dt: minimal time interval between 2 spikes (s) + :return: 3-tuple with number of spikes, latency (s) and spike rate (sp/s) + ''' + i_spikes = detectPeaksTime(t, Qm, min_amp, min_dt) + if i_spikes is not None: + latency = t[i_spikes[0]] # s + n_spikes = i_spikes.size + if n_spikes > 1: + first_to_last_spike = t[i_spikes[-1]] - t[i_spikes[0]] # s + spike_rate = n_spikes / first_to_last_spike # spikes/s + else: + spike_rate = 'N/A' + else: + latency = 'N/A' + spike_rate = 'N/A' + n_spikes = 0 + return (n_spikes, latency, spike_rate) + + +class InteractiveLegend(object): + """ Class defining an interactive matplotlib legend, where lines visibility can + be toggled by simply clicking on the corresponding legend label. Other graphic + objects can also be associated to the toggle of a specific line + + Adapted from: + http://stackoverflow.com/questions/31410043/hiding-lines-after-showing-a-pyplot-figure + """ + + def __init__(self, legend, aliases): + self.legend = legend + self.fig = legend.axes.figure + self.lookup_artist, self.lookup_handle = self._build_lookups(legend) + self._setup_connections() + self.handles_aliases = aliases + self.update() + + def _setup_connections(self): + for artist in self.legend.texts + self.legend.legendHandles: + artist.set_picker(10) # 10 points tolerance + + self.fig.canvas.mpl_connect('pick_event', self.on_pick) + + def _build_lookups(self, legend): + ''' Method of the InteractiveLegend class building + the legend lookups. ''' + + labels = [t.get_text() for t in legend.texts] + handles = legend.legendHandles + label2handle = dict(zip(labels, handles)) + handle2text = dict(zip(handles, legend.texts)) + + lookup_artist = {} + lookup_handle = {} + for artist in legend.axes.get_children(): + if artist.get_label() in labels: + handle = label2handle[artist.get_label()] + lookup_handle[artist] = handle + lookup_artist[handle] = artist + lookup_artist[handle2text[handle]] = artist + + lookup_handle.update(zip(handles, handles)) + lookup_handle.update(zip(legend.texts, handles)) + + return lookup_artist, lookup_handle + + def on_pick(self, event): + handle = event.artist + if handle in self.lookup_artist: + artist = self.lookup_artist[handle] + artist.set_visible(not artist.get_visible()) + self.update() + + def update(self): + for artist in self.lookup_artist.values(): + handle = self.lookup_handle[artist] + if artist.get_visible(): + handle.set_visible(True) + if artist in self.handles_aliases: + for al in self.handles_aliases[artist]: + al.set_visible(True) + else: + handle.set_visible(False) + if artist in self.handles_aliases: + for al in self.handles_aliases[artist]: + al.set_visible(False) + self.fig.canvas.draw() + + + def show(self): + ''' showing the interactive legend ''' + + plt.show() + + +def find_nearest(array, value): + ''' Find nearest element in 1D array. ''' + + idx = (np.abs(array - value)).argmin() + return (idx, array[idx]) + + +def rescale(x, lb, ub, lb_new=0, ub_new=1): + ''' Rescale a value to a specific interval by linear transformation. ''' + + xnorm = (x - lb) / (ub - lb) + return xnorm * (ub_new - lb_new) + lb_new + + +def printPct(pct, precision): + print(('{:.' + str(precision) + 'f}%').format(pct), end='', flush=True) + print('\r' * (precision + 3), end='') + + +def LennardJones(x, beta, alpha, C, m, n): + """ Generic expression of a Lennard-Jones function, adapted for the context of + symmetric deflection (distance = 2x). + + :param x: deflection (i.e. half-distance) + :param beta: x-shifting factor + :param alpha: x-scaling factor + :param C: y-scaling factor + :param m: exponent of the repulsion term + :param n: exponent of the attraction term + :return: Lennard-Jones potential at given distance (2x) + """ + return C * (np.power((alpha / (2 * x + beta)), m) - np.power((alpha / (2 * x + beta)), n)) + + + +def getPatchesLoc(t, states): + ''' Determine the location of stimulus patches. + + :param t: simulation time vector (s). + :param states: a vector of stimulation state (ON/OFF) at each instant in time. + :return: 3-tuple with number of patches, timing of STIM-ON an STIM-OFF instants. + ''' + + # Compute states derivatives and identify bounds indexes of pulses + dstates = np.diff(states) + ipatch_on = np.insert(np.where(dstates > 0.0)[0] + 1, 0, 0) + ipatch_off = np.where(dstates < 0.0)[0] + + # Get time instants for pulses ON and OFF + npatches = ipatch_on.size + tpatch_on = t[ipatch_on] + tpatch_off = t[ipatch_off] + + # return 3-tuple with #patches, pulse ON and pulse OFF instants + return (npatches, tpatch_on, tpatch_off) + + +def CheckBatchLog(batch_type): + ''' Determine batch directory, and add a log file to the directory if it is absent. + + :param batch_type: name of the log file to search for + :return: 2-tuple with full paths to batch directory and log file + ''' + + # Get batch directory from user + root = tk.Tk() + root.withdraw() + batch_dir = filedialog.askdirectory() + assert batch_dir, 'No batch directory chosen' + + # Check presence of log file in batch directory + logdst = batch_dir + '/log.xlsx' + log_in_dir = os.path.isfile(logdst) + + # If no log file, copy template in directory + if not log_in_dir: + + # Determine log template from batch type + if batch_type == 'mech': + logfile = 'log_mech.xlsx' + elif batch_type == 'elec': + logfile = 'log_elec.xlsx' + else: + raise ValueError('Unknown batch type', batch_type) + this_dir, _ = os.path.split(__file__) + # par_dir = os.path.abspath(os.path.join(this_dir, os.pardir)) + logsrc = this_dir + '/templates/' + logfile + + # Copy template + shutil.copy2(logsrc, logdst) + + return (batch_dir, logdst) diff --git a/README.md b/README.md new file mode 100644 index 0000000..c0db381 --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +PointNICE is a Python implementation of the *Neuronal Intramembrane Cavitation Excitation* (NICE) model introduced by Plaksin et. al in 2014 and initially developed in MATLAB by its authors. It contains optimized methods to compute the response of point-neuron models to both acoustic and electrical stimuli. + +This package contains several core modules: + - **bls** defines the underlying biomechanical model of intramembrane cavitation for a charged or uncharged membrane (**BilayerSonophore** class), and provides an integration method to compute the mechanical behaviour of the system subject to an continuous acoustic perturbation. + - **solvers** contains a simple solver for electrical stimuli (**SolverElec** class) as well as a tailored solver for acoustic stimuli (**SolverUS** class). The latter directly inherits from the BilayerSonophore class that is internally instantiated, and is hooked to a specific "channel mechanism" in order to link the mechanical model to an electrical model of membrane dynamics. It also provides several integration methods (detailed below) to compute the behaviour of the full electro-mechanical model subject to a continuous or pulsed acoustic stimulus. + - **channels** contains the definitions of the different channels mechanisms inherent to specific neuron types. + - **utils** defines generic utilities used across the different modules + +The **SolverUS** class incorporates optimized numerical integration methods to perform dynamic simulations of the model subject to acoustic perturbation, and compute the evolution of its mechanical and electrical variables: + - a **classic** method that solves all variables for the entire duration of the simulation. This method uses very small time steps and is computationally expensive (simulation time: several hours) + - a **hybrid** method (initially developed by Plaskin et al.) in which integration is performed in consecutive “slices” of time, during which the full system is solved until mechanical stabilization, at which point the electrical system is solely solved with predicted mechanical variables until the end of the slice. This method is more efficient (simulation time: several minutes) and provides accurate results. + - a newly developed **effective** method that neglects the high amplitude oscillations of mechanical and electrical variables during each acoustic cycle, to instead grasp the net effect of the acoustic stimulus on the electrical system. To do so, the sole electrical system is solved using pre-computed coefficients that depend on membrane charge and acoustic amplitude. This method allows to run simulations of the electrical system in only a few seconds, with very accurate results of the net membrane charge evolution. + +This package is meant to be easy to deploy and use as a predictive tool for researchers investigating ultrasonic neuromodulation in a variety of biological structures. diff --git a/deprecated/GPR/test_GPR1D.py b/deprecated/GPR/test_GPR1D.py new file mode 100644 index 0000000..d046fa7 --- /dev/null +++ b/deprecated/GPR/test_GPR1D.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-04-24 11:04:39 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-05-26 18:34:14 + +''' Predict a 1D Vmeff profile using Gaussian Process Regression. ''' + +import pickle +import numpy as np +import matplotlib.pyplot as plt +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C + + +class Variable: + ''' dummy class to contain information about the variable ''' + + name = '' + unit = '' + lookup = '' + factor = 1. + max_error = 0. + + def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): + self.name = var_name + self.unit = var_unit + self.factor = var_factor + self.lookup = var_lookup + self.max_error = var_max_error + + +# Set data variable and Kriging parameters +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-2) +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) +# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) + + +# Define true function by interpolation from specific profile +def f(x): + return np.interp(x, Qm, xvect) + + +# Load coefficient profile +dirpath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/' +filepath = dirpath + 'lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' +filepath0 = dirpath + 'lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' +with open(filepath, 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + xvect = lookup[varinf.lookup] +with open(filepath0, 'rb') as fh: + lookup = pickle.load(fh) + xvect0 = lookup[varinf.lookup] + +# xvect = xvect - xvect0 + +# Define algorithmic parameters +n_iter_min = 10 +n_iter_max = 20 +max_pred_errors = [] +max_errors = [] +delta_factor = 10 + +# Define prediction vector +x = np.atleast_2d(np.linspace(-150., 150., 1000) * 1e-5).T +y = f(x).ravel() + +# Define initial samples and compute function at these points +X0 = np.atleast_2d(np.linspace(-150., 150., 10) * 1e-5).T +Y0 = f(X0).ravel() + +# Instantiate a Gaussian Process model +print('Creating Gaussian Process with RBF Kernel') +kernel = C(100.0, (1.0, 500.0)) * RBF(1e-4, (1e-5, 1e-3)) # + C(100.0, (1.0, 500.0)) * RBF(1e-5, (1e-5, 1e-3)) +gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8, normalize_y=True) + +# Fit to data using Maximum Likelihood Estimation of the parameters +print('Initial fitting') +gp.fit(X0, Y0) + +# Make the prediction on the meshed x-axis (ask for MSE as well) +print('Predicting over linear input range') +ypred0, ypred0_std = gp.predict(x, return_std=True) +max_err = np.amax(np.abs(y - ypred0)) +max_errors.append(max_err) +max_pred_error = np.amax(ypred0_std) +max_pred_errors.append(max_pred_error) +print('Initialization: Kernel =', gp.kernel_, + ', Max err = {:.2f} {}, Max pred. err = {:.2f} {}'.format( + max_err * varinf.factor, varinf.unit, max_pred_error * varinf.factor, varinf.unit)) + + +# Initial observation and prediction +yminus0 = ypred0 - delta_factor * ypred0_std +yplus0 = ypred0 + delta_factor * ypred0_std +fig, ax = plt.subplots() +ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') +ax.plot(X0 * 1e5, Y0 * varinf.factor, 'r.', markersize=10, label=u'Initial Observations') +ax.plot(x * 1e5, ypred0 * varinf.factor, 'b-', label=u'Initial Prediction') +ax.fill(np.concatenate([x, x[::-1]]) * 1e5, + np.concatenate([yminus0, yplus0[::-1]]) * varinf.factor, + alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) +ax.set_xlabel('$Q_m\ (nC/cm^2)$') +ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') +ax.legend() +ax.set_title('Initial observation and prediction') + +print('Optimizing prediction by adding samples iteratively') + +X = X0 +Y = Y0 +ypred = ypred0 +ypred_std = ypred0_std + +n_iter = 0 +while (max_pred_error > varinf.max_error and n_iter < n_iter_max) or n_iter < n_iter_min: + newX = x[np.argmax(ypred_std)] + newY = f(newX) + X = np.atleast_2d(np.insert(X.ravel(), -1, newX)).T + Y = np.insert(Y, -1, newY) + gp.fit(X, Y) + ypred, ypred_std = gp.predict(x, return_std=True) + max_err = np.amax(np.abs(y - ypred)) + max_errors.append(max_err) + max_pred_error = np.amax(ypred_std) + max_pred_errors.append(max_pred_error) + print('Step {}:'.format(n_iter + 1), ' Kernel =', gp.kernel_, + ', Max err = {:.2f} {}, Max pred. err = {:.2f} {}'.format( + max_err * varinf.factor, varinf.unit, max_pred_error * varinf.factor, varinf.unit)) + if (n_iter + 1) % 5 == 0: + yminus = ypred - delta_factor * ypred_std + yplus = ypred + delta_factor * ypred_std + fig, ax = plt.subplots() + ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') + ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final Observations') + ax.plot(x * 1e5, ypred * varinf.factor, 'b-', label=u'Final Prediction') + ax.fill(np.concatenate([x, x[::-1]]) * 1e5, + np.concatenate([yminus, yplus[::-1]]) * varinf.factor, + alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) + ax.set_xlabel('$Q_m\ (nC/cm^2)$') + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') + ax.legend() + ax.set_title('After {} steps'.format(n_iter + 1)) + n_iter += 1 + + +# Final observation and prediction +yminus = ypred - delta_factor * ypred_std +yplus = ypred + delta_factor * ypred_std +fig, ax = plt.subplots() +ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'True Function') +ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final Observations') +ax.plot(x * 1e5, ypred * varinf.factor, 'b-', label=u'Final Prediction') +ax.fill(np.concatenate([x, x[::-1]]) * 1e5, + np.concatenate([yminus, yplus[::-1]]) * varinf.factor, + alpha=.5, fc='b', ec='None', label='$\\pm\ {:.0f} \\sigma$'.format(delta_factor)) +ax.set_xlabel('$Q_m\ (nC/cm^2)$') +ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') +ax.legend() +ax.set_title('Final observation and prediction') + +# Evolution of max. absolute error +fig, ax = plt.subplots() +ax.plot(np.linspace(0, n_iter, n_iter + 1), max_errors) +ax.set_xlabel('# iterations') +ax.set_ylabel('Max. error ($' + varinf.unit + ')$') + +# Evolution of max. predicted error +fig, ax = plt.subplots() +ax.plot(np.linspace(0, n_iter, n_iter + 1), max_pred_errors) +ax.set_xlabel('# iterations') +ax.set_ylabel('Max. predicted error ($' + varinf.unit + ')$') + +plt.show() diff --git a/deprecated/GPR/test_GPR2D.py b/deprecated/GPR/test_GPR2D.py new file mode 100644 index 0000000..9d9ecd1 --- /dev/null +++ b/deprecated/GPR/test_GPR2D.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-04-24 11:04:39 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-06-01 13:38:57 + +''' Predict a 2D Vmeff profile using Gaussian Process Regression. ''' + +import os, ntpath +import pickle +import re +import numpy as np +from scipy.interpolate import griddata +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C +from utils import OpenFilesDialog, rescale, rmse + + +class Variable: + ''' dummy class to contain information about the variable ''' + + name = '' + unit = '' + lookup = '' + factor = 1. + max_error = 0. + + def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): + self.name = var_name + self.unit = var_unit + self.factor = var_factor + self.lookup = var_lookup + self.max_error = var_max_error + + +# Define true function by interpolation from specific profiles +def f(x): + return griddata(points, values, x, method='linear', rescale=True) + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz dense/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +pltdir = 'C:/Users/admin/Desktop/GPR output/' + +# Set data variable and Kriging parameters +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1.0) +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e4) +# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) +# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) +# varinf = Variable('\\alpha_{n, eff}', 'ms^{-1}', 'alpha_n_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{n, eff}', 'ms^{-1}', 'beta_n_eff', 1e-3, 1e1) +# varinf = Variable('(p_{\\infty}\ /\ \\tau_p)_{eff}', 'ms^{-1}', 'pinf_over_taup_eff', 1e-3, 1e1) +# varinf = Variable('(1\ /\ \\tau_p)_{eff}', 'ms^{-1}', 'inv_taup_eff', 1e-3, 1e1) +# varinf = Variable('n_{g,on}', 'mole', 'ng_eff_on', 1e22, 1e1) +# varinf = Variable('n_{g,off}', 'mole', 'ng_eff_off', 1e22, 1e1) + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing lookup tables') + nfiles = len(lookup_filepaths) + amps = np.empty(nfiles) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + if i == 0: + Qm = lookup['Q'] + nQ = np.size(Qm) + var = np.empty((nfiles, nQ)) + var[i, :] = lookup[varinf.lookup] + else: + if np.array_equal(Qm, lookup['Q']): + var[i, :] = lookup[varinf.lookup] + else: + print('Error: charge vector not consistent') + + # Compute data metrics + namps = amps.size + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + varmin = np.amin(var) + varmax = np.amax(var) + print('Initial data:', nQ, 'charges,', namps, 'amplitudes') + + np.savetxt('tmp.txt', np.transpose(var)) + quit() + + # Define points for interpolation function + Q_mesh, A_mesh = np.meshgrid(Qm, amps) + points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) + values = var.flatten() + + # Define algorithmic parameters + n_iter_min = 10 + n_iter_max = 100 + MAE_pred = [] + MAE_true = [] + RMSE_true = [] + + # Define estimation vector + nAest = 50 + nQest = 100 + Aest = np.linspace(Amin, Amax, nAest) + Qest = np.linspace(Qmin, Qmax, nQest) + Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) + x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) + ytrue = f(x).ravel().reshape((nAest, nQest)) + + # Define initial observation vector + nAobs = 5 + nQobs = 20 + Aobs = np.linspace(Amin, Amax, nAobs) + Qobs = np.linspace(Qmin, Qmax, nQobs) + Qobs_mesh, Aobs_mesh = np.meshgrid(Qobs, Aobs) + X0 = np.column_stack([Aobs_mesh.flatten(), Qobs_mesh.flatten()]) + Y0 = f(X0).ravel() + + # np.savetxt('data_sparse.txt', np.column_stack([X0, Y0]), fmt='% .7e', delimiter=' ', newline='\n ') + # quit() + + + # Instantiate a Gaussian Process model + print('Creating Gaussian Process with RBF Kernel') + kernel = C(100.0, (1.0, 500.0)) * RBF((1e4, 1e-4), ((1e3, 1e5), (1e-5, 1e-3))) + gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=8, normalize_y=True) + + # Fit to initial data using Maximum Likelihood Estimation of the parameters + print('Initial fitting') + gp.fit(X0, Y0) + X = X0 + Y = Y0 + + # Make the prediction on the meshed x-axis (ask for MSE as well) + print('Predicting over linear input range') + y0, y0_std = gp.predict(x, return_std=True) + y0 = y0.reshape((nAest, nQest)) + y0_std = y0_std.reshape((nAest, nQest)) + y0_err_true = np.abs(y0 - ytrue) + MAE_pred.append(np.amax(y0_std)) + MAE_true.append(np.amax(np.abs(y0 - ytrue))) + RMSE_true.append(rmse(y0, ytrue)) + print('Initialization: Kernel =', gp.kernel_) + print('predicted MAE = {:.2f} {}, true MAE = {:.2f} {}'.format(MAE_pred[-1] * varinf.factor, + varinf.unit, + MAE_true[-1] * varinf.factor, + varinf.unit)) + # Optimization + print('Optimizing prediction by adding samples iteratively') + n_iter = 0 + y_std = y0_std + while n_iter < n_iter_max and (MAE_pred[-1] > varinf.max_error or n_iter < n_iter_min): + new_X = x[np.argmax(y_std)] + X = np.vstack((X, new_X)) + Y = np.append(Y, f(new_X)) + gp.fit(X, Y) + y, y_std = gp.predict(x, return_std=True) + y = y.reshape((nAest, nQest)) + y_std = y_std.reshape((nAest, nQest)) + y_err_true = np.abs(y - ytrue) + MAE_pred.append(np.amax(y_std)) + MAE_true.append(np.amax(np.abs(y - ytrue))) + RMSE_true.append(rmse(y, ytrue)) + print('step {}:'.format(n_iter + 1), 'Kernel =', gp.kernel_) + print('predicted MAE = {:.2f} {}, true MAE = {:.2f} {}'.format(MAE_pred[-1] * varinf.factor, + varinf.unit, + MAE_true[-1] * varinf.factor, + varinf.unit)) + n_iter += 1 + + # Plotting + mymap = cm.get_cmap('viridis') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + var_levels = np.linspace(varmin, varmax, 20) * varinf.factor + sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, varmax * varinf.factor)) + sm_var._A = [] + varerr0_levels = np.linspace(0., np.amax(y0_err_true), 20) * varinf.factor + varerr_levels = np.linspace(0., np.amax(y_err_true), 20) * varinf.factor + sm_varerr0 = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., np.amax(y0_err_true) * varinf.factor)) + sm_varerr0._A = [] + sm_varerr = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., np.amax(y_err_true) * varinf.factor)) + sm_varerr._A = [] + + # True function profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('True function profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, ytrue[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig1.png', format='png') + + # True function map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('True function map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue * varinf.factor, levels=var_levels, + cmap='viridis') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig2.png', format='png') + + # Initial estimation profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Initial estimation profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, y0[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig3.png', format='png') + + # Initial estimation map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Initial estimation map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y0 * varinf.factor, levels=var_levels, + cmap='viridis') + ax.scatter(X0[:, 1] * 1e5, X0[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig4.png', format='png') + + # Initial error profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Initial error profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, (y0[i, :] - ytrue[i, :]) * varinf.factor, + c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig5.png', format='png') + + # Initial error map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Initial error map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y0_err_true * varinf.factor, levels=varerr0_levels, + cmap='viridis') + ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_varerr0, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig6.png', format='png') + + # Final estimation profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Final estimation profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, y[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig7.png', format='png') + + # Final estimation map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Final estimation map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y * varinf.factor, levels=var_levels, + cmap='viridis') + ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig8.png', format='png') + + # Final error profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Final error profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, (y[i, :] - ytrue[i, :]) * varinf.factor, + c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig9.png', format='png') + + # Final error map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Final error map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y_err_true * varinf.factor, levels=varerr_levels, + cmap='viridis') + ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_varerr, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig10.png', format='png') + + # Error evolution + fig, ax = plt.subplots() + iters = np.linspace(0, n_iter, n_iter + 1) + ax.plot(iters, np.array(MAE_true) * varinf.factor, label='true error') + ax.plot(iters, np.array(MAE_pred) * varinf.factor, label='predicted error') + ax.plot(iters, np.array(RMSE_true) * varinf.factor, label='true RMSE') + ax.set_xlabel('# iterations', fontsize=20) + ax.set_ylabel('Max. absolute error ($' + varinf.unit + ')$', fontsize=20) + ax.set_title('Error evolution', fontsize=20) + ax.legend(fontsize=20) + fig.savefig(pltdir + 'fig11.png', format='png') + + # plt.show() diff --git a/deprecated/GPR/test_GPR2D_multiout.py b/deprecated/GPR/test_GPR2D_multiout.py new file mode 100644 index 0000000..00bf94f --- /dev/null +++ b/deprecated/GPR/test_GPR2D_multiout.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-04-24 11:04:39 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-06-01 17:05:42 + +''' Predict nine different 2D coefficients profile using Gaussian Process Regression. ''' + +import os +import ntpath +import pickle +import re +import logging +import warnings +import numpy as np +from scipy.interpolate import griddata +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from mpl_toolkits.mplot3d import Axes3D +from sklearn.gaussian_process import GaussianProcessRegressor as GPR +from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C +from utils import OpenFilesDialog, rmse, lh2DWithCorners + + +# Define true function by interpolation from specific profile +def f(x): + out = np.empty((x.shape[0], nvar)) + for k in range(nvar): + out[:, k] = griddata(points, values[:, k], x, method='linear', rescale=True) + return out + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz dense/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +outdir = 'C:/Users/admin/Desktop/GPRmultiout output/' + +# Define logging settings and clear log file +logfile = outdir + 'GPR2D_multiout.log' +logging.basicConfig(filename=logfile, level=logging.DEBUG, + format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') +with open(logfile, 'w'): + pass + +lookups = ['V_eff', 'alpha_m_eff', 'beta_m_eff', 'alpha_h_eff', 'beta_h_eff', 'alpha_n_eff', + 'beta_n_eff', 'pinf_over_taup_eff', 'inv_taup_eff', 'ng_eff_on', 'ng_eff_off'] +nvar = len(lookups) + +max_errors = [1e0, 1e3, 1e3, 1e8, 1e2, 1e2, 1e4, 1e8, 1e9] +Ckernels = [C(100.0, (1.0, 500.0)), C(1e3, (1e0, 1e5)), C(1e3, (1e0, 1e5)), C(1e5, (1e0, 1e9)), + C(1e2, (1e0, 1e4)), C(1e2, (1e0, 1e4)), C(1e4, (1e0, 1e6)), C(1e5, (1e0, 1e9)), + C(1e5, (1e0, 1e9)), C(1e0, (1e-1, 1e1)), C(1e0, (1e-1, 1e1))] + +factors = [1e0] + [1e-3 for i in range(8)] + [1e0 for i in range(2)] + +units = ['mV'] + ['ms-1' for i in range(8)] + ['1e-22 mole' for i in range(2)] + +plot_names = ['V_{m, eff}', '\\alpha_{m, eff}', '\\beta_{m, eff}', '\\alpha_{h, eff}', + '\\beta_{h, eff}', '\\alpha_{n, eff}', '\\beta_{n, eff}', + 'p_{\\infty}/\\tau_p', '1/\\tau_p', 'n_{g,on}', 'n_{g,off}'] + +plot_units = ['mV'] + ['ms^{-1}' for i in range(8)] + ['10^{-22}\ mole' for i in range(2)] + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing lookup tables') + logging.info('Files selected - importing lookup tables') + nfiles = len(lookup_filepaths) + amps = np.empty(nfiles) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + a = float(mo.group(1)) * 1e-9 + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup_data = pickle.load(fh) + if i == 0: + Qmfull = lookup_data['Q'] + Qm = Qmfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] + nQ = np.size(Qm) + var = np.empty((nfiles, nQ, nvar)) + for j in range(nvar): + varfull = lookup_data[lookups[j]] + var[i, :, j] = varfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] + else: + Qmfull = lookup_data['Q'] + if np.array_equal(Qm, Qmfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)]): + for j in range(nvar): + varfull = lookup_data[lookups[j]] + var[i, :, j] = varfull[(Qmfull >= -80.0e-5) & (Qmfull <= 50.0e-5)] + else: + print('Error: charge vector not consistent') + + + # Multiplying the gas molar contents + var[:, :, -2] = var[:, :, -2] * 1e22 + var[:, :, -1] = var[:, :, -1] * 1e22 + + # Compute data metrics + namps = amps.size + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + varmin = np.amin(var, axis=(0, 1)) + varmax = np.amax(var, axis=(0, 1)) + logstr = 'Initial data: {} charges, {} amplitudes'.format(nQ, namps) + print(logstr) + logging.info(logstr) + + # Define points for interpolation function + Q_mesh, A_mesh = np.meshgrid(Qm, amps) + points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) + values = var.reshape(namps * nQ, nvar) + + # Define algorithmic parameters + n_iter_max = 100 + MAE_pred = [] + MAE_true = [] + RMSE_true = [] + + # Define estimation grid + nAest = 50 + nQest = 100 + Aest = np.linspace(Amin, Amax, nAest) + Qest = np.linspace(Qmin, Qmax, nQest) + Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) + x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) + ytrue = f(x).reshape((nAest, nQest, nvar)) + logstr = 'Estimation grid: {} charges, {} amplitudes'.format(nQest, nAest) + print(logstr) + logging.info(logstr) + + # Define initial observation grid + n0 = 24 + X0 = lh2DWithCorners(n0, (Amin, Amax), (Qmin, Qmax), 'center') + Y0 = f(X0) + logstr = 'Initial observation grid: Latin Hypercube ({} samples) with 4 corners'.format(n0 - 4) + print(logstr) + logging.info(logstr) + + # Instantiate Gaussian Process models + logstr = 'Creating {} Gaussian Processes with scaled RBF Kernels'.format(nvar) + print(logstr) + logging.info(logstr) + kernels = [Ck * RBF((1e4, 1e-4), ((1e3, 1e5), (1e-5, 1e-3))) for Ck in Ckernels] + gprs = [GPR(kernel=k, n_restarts_optimizer=8, normalize_y=True) for k in kernels] + + + # Fit to initial data using Maximum Likelihood Estimation of the parameters + print('Step 0') + logging.info('-------------------------- Initialization --------------------------') + + logstr = 'Fitting' + print(logstr) + logging.info(logstr) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(nvar): + gprs[i].fit(X0, Y0[:, i]) + + + # Make the prediction on the meshed x-axis (ask for MSE as well) + logstr = 'Predicting' + print(logstr) + logging.info(logstr) + y0 = np.empty((nAest * nQest, nvar)) + y0_std = np.empty((nAest * nQest, nvar)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(nvar): + y0[:, i], y0_std[:, i] = gprs[i].predict(x, return_std=True) + y0 = y0.reshape((nAest, nQest, nvar)) + y0_std = y0_std.reshape((nAest, nQest, nvar)) + MAE_pred.append(np.amax(y0_std, axis=(0, 1))) + MAE_true.append(np.amax(np.abs(y0 - ytrue), axis=(0, 1))) + RMSE_true.append(np.array([rmse(y0[:, :, i], ytrue[:, :, i]) for i in range(nvar)])) + logging.info('Kernels:') + for i in range(nvar): + logging.info(' {}: {}'.format(lookups[i], gprs[i].kernel_)) + logging.info('predicted MAEs:') + for i in range(nvar): + logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_pred[-1][i] * factors[i], units[i])) + logging.info('true MAEs:') + for i in range(nvar): + logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_true[-1][i] * factors[i], units[i])) + + # Copy initial data for iterations + X = np.moveaxis(np.array([X0 for i in range(nvar)]), 0, -1) + Y = Y0 + + + # Optimization + print('Optimizing prediction by adding samples iteratively') + n_iter = 0 + y_std = y0_std + y_flat = np.empty((nAest * nQest, nvar)) + y_std_flat = np.empty((nAest * nQest, nvar)) + while n_iter < n_iter_max: + print('Step', n_iter + 1) + logstr = '-------------------------- Step {} --------------------------'.format(n_iter + 1) + logging.info(logstr) + + print('Determining new samples') + iMAEs = [np.argmax(y_std[:, :, i]) for i in range(nvar)] + newX = x[iMAEs, :] + X = np.concatenate((X, np.expand_dims(np.transpose(newX), axis=0)), axis=0) + newY = np.expand_dims(np.array([f(newX[i, :])[0, i] for i in range(nvar)]), axis=0) + Y = np.vstack((Y, newY)) + + logstr = 'Fitting' + print(logstr) + logging.info(logstr) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(nvar): + gprs[i].fit(X[:, :, i], Y[:, i]) + + logstr = 'Predicting' + print(logstr) + logging.info(logstr) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(nvar): + y_flat[:, i], y_std_flat[:, i] = gprs[i].predict(x, return_std=True) + y = y_flat.reshape((nAest, nQest, nvar)) + y_std = y_std_flat.reshape((nAest, nQest, nvar)) + y_err_true = np.abs(y - ytrue) + + MAE_pred.append(np.amax(y_std, axis=(0, 1))) + MAE_true.append(np.amax(np.abs(y - ytrue), axis=(0, 1))) + RMSE_true.append(np.array([rmse(y[:, :, i], ytrue[:, :, i]) for i in range(nvar)])) + logging.info('Kernels:') + for i in range(nvar): + logging.info(' {}: {}'.format(lookups[i], gprs[i].kernel_)) + logging.info('predicted MAEs:') + for i in range(nvar): + logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_pred[-1][i] * factors[i], units[i])) + logging.info('true MAEs:') + for i in range(nvar): + logging.info(' {}: {:.2f} {}'.format(lookups[i], MAE_true[-1][i] * factors[i], units[i])) + + n_iter += 1 + + # Saving + gprs_dict = {} + for i in range(nvar): + gprs_dict[lookups[i]] = gprs[i] + predictor_file = 'predictors_a{:.1f}nm_f{:.1f}kHz.pkl'.format(a * 1e9, Fdrive * 1e-3) + logstr = 'Saving predictors dictionary in output file: {}'.format(predictor_file) + logging.info(logstr) + print(logstr) + with open(outdir + predictor_file, 'wb') as fh: + pickle.dump(gprs_dict, fh) + + # Plotting + print('Plotting') + mymap = cm.get_cmap('viridis') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + var_levels = np.array([np.linspace(varmin[i], varmax[i], 20) * factors[i] for i in range(nvar)]) + sm_var = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin[i] * factors[i], + varmax[i] * factors[i])) + for i in range(nvar)] + for smv in sm_var: + smv._A = [] + varerr0_levels = np.array([np.linspace(0., MAE_pred[0][i], 20) * factors[i] for i in range(nvar)]) + varerr_levels = np.array([np.linspace(0., MAE_pred[-1][i], 20) * factors[i] for i in range(nvar)]) + sm_varerr0 = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., + MAE_pred[0][i] * factors[i])) + for i in range(nvar)] + for smv in sm_varerr0: + smv._A = [] + sm_varerr = [plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(0., + MAE_pred[-1][i] * factors[i])) + for i in range(nvar)] + for smv in sm_varerr: + smv._A = [] + + + + for i in range(nvar): + + print('figure {}/{}'.format(i + 1, nvar)) + + # RESPONSE SURFACE + fig = plt.figure(figsize=(24, 12)) + + # True function + ax = fig.add_subplot(2, 3, 1, projection='3d') + ax.set_title('True function', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) + ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) + ax.xaxis._axinfo['label']['space_factor'] = 3.0 + ax.yaxis._axinfo['label']['space_factor'] = 3.0 + ax.zaxis._axinfo['label']['space_factor'] = 3.0 + ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, ytrue[:, :, i] * factors[i], cmap=mymap) + + # Initial prediction + ax = fig.add_subplot(2, 3, 2, projection='3d') + ax.set_title('Initial prediction', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) + ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) + ax.xaxis._axinfo['label']['space_factor'] = 3.0 + ax.yaxis._axinfo['label']['space_factor'] = 3.0 + ax.zaxis._axinfo['label']['space_factor'] = 3.0 + ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, y0[:, :, i] * factors[i], cmap=mymap) + + # Final prediction + ax = fig.add_subplot(2, 3, 3, projection='3d') + ax.set_title('Final prediction', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) + ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) + ax.xaxis._axinfo['label']['space_factor'] = 3.0 + ax.yaxis._axinfo['label']['space_factor'] = 3.0 + ax.zaxis._axinfo['label']['space_factor'] = 3.0 + ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, y[:, :, i] * factors[i], cmap=mymap) + + # Sampling map + ax = fig.add_subplot(2, 3, 4) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20, labelpad=10) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20, labelpad=10) + ax.set_title('Sampling map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue[:, :, i] * factors[i], levels=var_levels[i, :], + cmap='viridis') + ax.scatter(X[:n0, 1, i] * 1e5, X[:n0, 0, i] * 1e-3, c='black', label='init. samples') + ax.scatter(X[n0:, 1, i] * 1e5, X[n0:, 0, i] * 1e-3, c='red', label='added samples') + ax.set_ylim(0.0, 1.15 * Amax * 1e-3) + # ax.legend(fontsize=20, loc=3) + ax.legend(fontsize=20, loc=9, ncol=2) + + # Initial error + ax = fig.add_subplot(2, 3, 5, projection='3d') + ax.set_title('Initial error', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) + ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) + ax.xaxis._axinfo['label']['space_factor'] = 3.0 + ax.yaxis._axinfo['label']['space_factor'] = 3.0 + ax.zaxis._axinfo['label']['space_factor'] = 3.0 + ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, + (y0[:, :, i] - ytrue[:, :, i]) * factors[i], cmap=mymap) + + # Final error + ax = fig.add_subplot(2, 3, 6, projection='3d') + ax.set_title('Final error', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=18) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=18) + ax.set_zlabel('$' + plot_names[i] + '\ (' + plot_units[i] + ')$', fontsize=18) + ax.xaxis._axinfo['label']['space_factor'] = 3.0 + ax.yaxis._axinfo['label']['space_factor'] = 3.0 + ax.zaxis._axinfo['label']['space_factor'] = 3.0 + ax.plot_surface(Qest_mesh * 1e5, Aest_mesh * 1e-3, + (y[:, :, i] - ytrue[:, :, i]) * factors[i], cmap=mymap) + + + plt.tight_layout() + fig.savefig(outdir + lookups[i] + '_surf.png', format='png') + plt.close(fig) diff --git a/deprecated/GPR/test_bayesian_optimization.py b/deprecated/GPR/test_bayesian_optimization.py new file mode 100644 index 0000000..ba3b3af --- /dev/null +++ b/deprecated/GPR/test_bayesian_optimization.py @@ -0,0 +1,119 @@ +import importlib +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from bayes_opt import BayesianOptimization +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import gridspec +import pickle +from utils import LoadParams, rescale +from constants import * + + + +# def getCoeff(nbls, Fdrive, Adrive, phi, Qm): + +# # Set time vector +# T = 1 / Fdrive +# t = np.linspace(0, T, NPC_FULL) +# dt = t[1] - t[0] + +# # Run STIM ON simulation and retrieve deflection and gas content vectors from last cycle +# (_, y_on, _) = nbls.runMech(Adrive, Fdrive, phi, Qm) +# (_, Z, _) = y_on +# deflections = Z[-NPC_FULL:] + +# # Compute membrane capacitance and potential vectors +# capacitances = np.array([nbls.Capct(ZZ) for ZZ in deflections]) +# elastance_integral = np.trapz(1 / capacitances, dx=dt) +# Vmeff = Qm * elastance_integral / T + +# return Vmeff + + +def target(x): + return np.interp(x, Qm, Vmeff) + + +def posterior(bo, x, xmin=0, xmax=150.e-5): + bo.gp.fit(bo.X, bo.Y) + mu, sigma = bo.gp.predict(x, return_std=True) + return mu, sigma + +def plot_gp(bo, x, y): + + fig = plt.figure(figsize=(16, 10)) + fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(bo.X)), fontdict={'size':30}) + + gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) + axis = plt.subplot(gs[0]) + acq = plt.subplot(gs[1]) + + mu, sigma = posterior(bo, x) + axis.plot(x, y, linewidth=3, label='Target') + axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, label=u'Observations', color='r') + axis.plot(x, mu, '--', color='k', label='Prediction') + + axis.fill(np.concatenate([x, x[::-1]]), + np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]), + alpha=.6, fc='c', ec='None', label='95% confidence interval') + + axis.set_xlim((0., 150.e-5)) + axis.set_ylim((None, None)) + axis.set_ylabel('f(x)', fontdict={'size':20}) + axis.set_xlabel('x', fontdict={'size':20}) + + utility = bo.util.utility(x, bo.gp, 0) + acq.plot(x, utility, label='Utility Function', color='purple') + acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15, + label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1) + acq.set_xlim((0., 150.e-5)) + # acq.set_ylim((0, np.max(utility) + 0.5)) + acq.set_ylabel('Utility', fontdict={'size':20}) + acq.set_xlabel('x', fontdict={'size':20}) + + axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.) + acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.) + + + +filepath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' +filepath0 = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' + +with open(filepath, 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + Vmeff = lookup['V_eff'] + +with open(filepath0, 'rb') as fh: + lookup = pickle.load(fh) + Vmbase = lookup['V_eff'] + +Vmeff = -(Vmeff - Vmbase) + + +nQ = 100 +x = np.linspace(0., 150., nQ).reshape(-1, 1) * 1e-5 +y = np.empty(nQ) +for i in range(nQ): + y[i] = target(x[i]) +fig, ax = plt.subplots() +ax.set_xlabel('$Q_m\ (nC/cm^2)$') +ax.set_ylabel('$V_{m, eff}\ (mV)$') +ax.plot(x * 1e5, y) + + +bo = BayesianOptimization(target, {'x': (0., 150.e-5)}) +bo.maximize(init_points=10, n_iter=0, acq='ei', kappa=1) +plot_gp(bo, x, y) + +# bo.maximize(init_points=10, n_iter=0, acq='ei', kappa=5) +# plot_gp(bo, x, y) +for i in range(5): + bo.maximize(init_points=0, n_iter=1, acq='ei', kappa=1) +plot_gp(bo, x, y) + +plt.show() + + diff --git a/deprecated/Taylor expansions/plot_rates_derivatives.py b/deprecated/Taylor expansions/plot_rates_derivatives.py new file mode 100644 index 0000000..f9af8c8 --- /dev/null +++ b/deprecated/Taylor expansions/plot_rates_derivatives.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-03-22 16:04:37 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-03-29 18:17:52 + +''' Plot profiles of rate constants functions and derivatives ''' + +import numpy as np +import matplotlib.pyplot as plt +from utils import bilinearExp, stdExp, dualExp, symExp, sigmoid + + +# Define function parameters +am_params = (-43.2, -0.32, 0.25) +Bm_params = (-16.2, 0.28, -0.20) +ah_params = (-39.2, 0.128, 1 / 18) +Bh_params = (-16.2, 4, 0.20) +an_params = (-41.2, -0.032, 0.20) +Bn_params = (-46.2, 0.5, 0.025) +pinf_params = (-35.0, 1, 0.1) +Tp_params = (-35.0, 0.608, 3.3, 0.05) +invTp_params = (-35.0, 1 / 0.608, 3.3, 0.05) + + +# Define potential range and maximal derivation order +nVm = 100 +Vm = np.linspace(-80.0, 50.0, nVm) # mV +norder = 3 + + +# Define vectors +dalpham = np.empty((norder + 1, nVm)) +dbetam = np.empty((norder + 1, nVm)) +dalphah = np.empty((norder + 1, nVm)) +dbetah = np.empty((norder + 1, nVm)) +dalphan = np.empty((norder + 1, nVm)) +dbetan = np.empty((norder + 1, nVm)) +dpinf = np.empty((norder + 1, nVm)) +dtaup = np.empty((norder + 1, nVm)) +dinvTp = np.empty((norder + 1, nVm)) +dpinfoverTp = np.empty((norder + 1, nVm)) + + +# Compute derivatives +for i in range(norder + 1): + dalpham[i, :] = bilinearExp(Vm, am_params, i) + dbetam[i, :] = bilinearExp(Vm, Bm_params, i) + dalphah[i, :] = stdExp(Vm, ah_params, i) + dbetah[i, :] = sigmoid(Vm, Bh_params, i) + dalphan[i, :] = bilinearExp(Vm, an_params, i) + dbetan[i, :] = stdExp(Vm, Bn_params, i) + dpinf[i, :] = sigmoid(Vm, pinf_params, i) + dtaup[i, :] = symExp(Vm, Tp_params, i) * 1e3 + dinvTp[i, :] = dualExp(Vm, invTp_params, i) * 1e-3 + + +# Compute pinf/taup derivatives +dpinfoverTp[0, :] = dpinf[0, :] * dinvTp[0, :] +dpinfoverTp[1, :] = dpinf[1, :] * dinvTp[0, :] + dpinf[0, :] * dinvTp[1, :] +dpinfoverTp[2, :] = dpinf[2, :] * dinvTp[0, :] + dpinf[1, :] * dinvTp[1, :]\ + + dpinf[0, :] * dinvTp[2, :] +dpinfoverTp[3, :] = dpinf[3, :] * dinvTp[0, :] + 3 * dpinf[2, :] * dinvTp[1, :]\ + + 3 * dpinf[1, :] * dinvTp[2, :] + dpinf[0, :] * dinvTp[3, :] + + +# Define plot parameters +seqx = (0, 0, 1, 1) +seqy = (0, 1, 0, 1) +f_str1 = ('$[ms^{-1}]$', '$d\ [ms^{-1}.mV^{-1}]$', '$d^2\ [ms^{-1}.mV^{-2}]$', + '$d^3\ [ms^{-1}.mV^{-3}]$') +f_str2 = ('$[-]$', '$d\ [mV^{-1}]$', '$d^2\ [mV^{-2}]$', '$d^3\ [mV^{-3}]$') +f_str3 = ('$[ms]$', '$d\ [ms.mV^{-1}]$', '$d^2\ [ms.mV^{-2}]$', '$d^3\ [ms.mV^{-3}]$') +titles1 = ('$\\alpha_m$', '$\\beta_m$', '$\\alpha_h$', '$\\beta_h$', '$\\alpha_n$', '$\\beta_n$') +titles2 = ('$\\frac{1}{\\tau_p}$', '$\\frac{p_{\\infty}}{\\tau_p}$') +vectors1 = (dalpham, dbetam, dalphah, dbetah, dalphan, dbetan) +vectors2 = (dinvTp, dpinfoverTp) + +# Plot alpha and beta functions +for j in range(len(vectors1)): + fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) + for i in range(4): + ax = axes[seqx[i], seqy[i]] + ax.set_xlabel('$V_m \ [mV]$', fontsize=24) + ax.set_ylabel(f_str1[i], fontsize=24) + ax.plot(Vm, vectors1[j][i, :]) + fig.suptitle(titles1[j], fontsize=30) + + +# Plot p_inf functions +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) +for i in range(4): + ax = axes[seqx[i], seqy[i]] + ax.set_xlabel('$V_m \ [mV]$', fontsize=24) + ax.set_ylabel(f_str2[i], fontsize=24) + ax.plot(Vm, dpinf[i, :]) +fig.suptitle('$p_{\\infty}$', fontsize=30) + + +# Plot tau_p functions +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) +for i in range(4): + ax = axes[seqx[i], seqy[i]] + ax.set_xlabel('$V_m \ [mV]$', fontsize=24) + ax.set_ylabel(f_str3[i], fontsize=24) + ax.plot(Vm, dtaup[i, :]) +fig.suptitle('$\\tau_p$', fontsize=30) + + +# Plot invTaup and pinf/Taup functions +for j in range(len(vectors2)): + fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(22, 10)) + for i in range(4): + ax = axes[seqx[i], seqy[i]] + ax.set_xlabel('$V_m \ [mV]$', fontsize=24) + ax.set_ylabel(f_str1[i], fontsize=24) + ax.plot(Vm, vectors2[j][i, :]) + fig.suptitle(titles2[j], fontsize=30) + + +plt.show() diff --git a/deprecated/Taylor expansions/test_alpham_Taylor.py b/deprecated/Taylor expansions/test_alpham_Taylor.py new file mode 100644 index 0000000..97f4452 --- /dev/null +++ b/deprecated/Taylor expansions/test_alpham_Taylor.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-03-21 11:38:56 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-03-29 19:12:27 + + +""" Taylor expansions of the alpha_m function around different potential values. """ + +import numpy as np +from scipy.special import factorial +import matplotlib.pyplot as plt +from utils import bilinearExp + +# Vm vector +nVm = 100 +Vm = np.linspace(-80.0, 50.0, nVm) # mV + +# alpha_m vector +am_params = (-43.2, -0.32, 0.25) +alpham = bilinearExp(Vm, am_params, 0) + +# alpha_m Taylor expansion +npoints = 10 +norder = 4 +Vm0 = np.linspace(-80.0, 50.0, npoints) # mV +Vmdiff = Vm - np.tile(Vm0, (nVm, 1)).transpose() +Talpham = np.empty((npoints, nVm)) +for i in range(npoints): + T = np.zeros(nVm) + for j in range(norder + 1): + T[:] += bilinearExp(Vm0[i], am_params, j) * Vmdiff[i, :]**j / factorial(j) + Talpham[i, :] = T + +# Plot standard alpha_m vs. Taylor reconstruction around Vm0 +_, ax = plt.subplots(figsize=(22, 10)) +ax.set_xlabel('$V_m\ [mV]$', fontsize=20) +ax.set_ylabel('$[ms^{-1}]$', fontsize=20) +ax.plot(Vm, alpham, linewidth=2, label='$\\alpha_m$') +for i in range(npoints): + ax.plot(Vm, Talpham[i, :], linewidth=2, label='$T_{}\\alpha_m({:.1f})$'.format(norder, Vm0[i])) +ax.legend(fontsize=20) + +plt.show() diff --git a/deprecated/Taylor expansions/test_alpham_eff_Taylor.py b/deprecated/Taylor expansions/test_alpham_eff_Taylor.py new file mode 100644 index 0000000..2003592 --- /dev/null +++ b/deprecated/Taylor expansions/test_alpham_eff_Taylor.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-03-21 11:38:56 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-03-29 19:40:44 + + +""" Perform Taylor expansions (up to 4th order) of the alpha_m function + along one acoustic cycle. """ + +import importlib +import numpy as np +from scipy.special import factorial +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import nblscore +from utils import LoadParams, rescale, bilinearExp +from constants import * +importlib.reload(nblscore) # reloading nblscore module + + +# Load NBLS parameters +params = LoadParams("params.yaml") +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Set geometry of NBLS structure +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} + +# Create a NBLS instance here (with dummy frequency parameter) +nbls = nblscore.NeuronalBilayerSonophore(geom, params, 0.0, True) + +# Set stimulation parameters +Fdrive = 3.5e5 # Hz +Adrive = 1e5 # Pa +phi = np.pi # acoustic wave phase + +# Set charge linear space +nQ = 100 +charges = np.linspace(-80.0, 50.0, nQ) * 1e-5 # C/m2 +Qmin = np.amin(charges) +Qmax = np.amax(charges) + +# Set alpha_m parameters +am_params = (-43.2, -0.32, 0.25) + +# Set highest Taylor expansion order +norder = 4 + +# Set time vector +T = 1 / Fdrive +t = np.linspace(0, T, NPC_FULL) +dt = t[1] - t[0] + +# Initialize coefficients vectors +deflections = np.empty((nQ, NPC_FULL)) +Vm = np.empty((nQ, NPC_FULL)) +alpham = np.empty((nQ, NPC_FULL)) + + +# Run mechanical simulations for each imposed charge density +print('Running {} mechanical simulations with imposed charge densities'.format(nQ)) +simcount = 0 +for i in range(nQ): + simcount += 1 + + # Log to console + print('--- sim {}/{}: Q = {:.1f} nC/cm2'.format(simcount, nQ, charges[i] * 1e5)) + + # Run simulation and retrieve deflection vector + (_, y, _) = nbls.runMech(Adrive, Fdrive, phi, charges[i]) + (_, Z, _) = y + deflections[i, :] = Z[-NPC_FULL:] + + # Compute Vm and alpham vectors + Vm[i, :] = [charges[i] / nbls.Capct(ZZ) for ZZ in deflections[i, :]] + alpham[i, :] = bilinearExp(Vm[i, :] * 1e3, am_params, 0) + + +# time-average Vm and alpham +Vmavg = np.mean(Vm, axis=1) +alphamavg = np.mean(alpham, axis=1) + +# (Vm - Vmavg) differences along cycle +Vmavgext = np.tile(Vmavg, (NPC_FULL, 1)).transpose() +Vmdiff = (Vm - Vmavgext) * 1e3 + +# alpham derivatives +dalpham = np.empty((norder + 1, nQ)) +for j in range(norder + 1): + dalpham[j, :] = bilinearExp(Vmavg * 1e3, am_params, j) + +# Taylor expansions along cycle +Talpham = np.empty((norder + 1, nQ, NPC_FULL)) +dalphamext = np.tile(dalpham.transpose(), (NPC_FULL, 1, 1)).transpose() +Talpham[0, :, :] = dalphamext[0, :, :] +for j in range(1, norder + 1): + jterm = dalphamext[j, :, :] * Vmdiff[:, :]**j / factorial(j) + Talpham[j, :, :] = Talpham[j - 1, :, :] + jterm + +# time-averaging of Taylor expansions +Talphamavg = np.mean(Talpham, axis=2) + + +# ------------------ PLOTS ------------------- + +mymap = cm.get_cmap('jet') +sm_Q = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Qmin * 1e5, Qmax * 1e5)) +sm_Q._A = [] +t_factor = 1e6 + +# 1: time average Vm +_, ax = plt.subplots(figsize=(22, 10)) +ax.set_xlabel('$Qm\ [uF/cm^2]$', fontsize=20) +ax.set_ylabel('$\\overline{V_m}\ [mV]$', fontsize=20) +ax.plot(charges * 1e5, Vmavg * 1e3, linewidth=2) + +# 2: alpham: standard time-averaged vs.evaluated at time-average Vm +# vs. Taylor reconstructions around Vm_avg +_, ax = plt.subplots(figsize=(22, 10)) +ax.set_xlabel('$Qm\ [uF/cm^2]$', fontsize=20) +ax.set_ylabel('$[ms^{-1}]$', fontsize=20) +ax.plot(charges * 1e5, alphamavg, linewidth=2, label='$\\overline{\\alpha_m(V_m)}$') +for j in range(norder + 1): + ax.plot(charges * 1e5, Talphamavg[j, :], linewidth=2, + label='$\\overline{T_' + str(j) + '[\\alpha_m(\\overline{V_m})]}$') + ax.legend(fontsize=20) + +# 3: original alpham vs. highest order Taylor alpham reconstruction +_, ax = plt.subplots(figsize=(22, 10)) +ax.set_xlabel('$t \ (us)$', fontsize=20) +ax.set_ylabel('$[ms^{-1}]$', fontsize=20) +ax.plot(t * t_factor, alpham[0, :], linewidth=2, + c=mymap(rescale(charges[0], Qmin, Qmax)), label='$\\overline{\\alpha_m(V_m)}$') +ax.plot(t * t_factor, Talpham[-1, 0, :], '--', linewidth=2, + c=mymap(rescale(charges[0], Qmin, Qmax)), + label='$T_' + str(norder) + '[\\alpha_m(\\overline{V_m})]$') +for i in range(1, nQ): + ax.plot(t * t_factor, alpham[i, :], linewidth=2, + c=mymap(rescale(charges[i], Qmin, Qmax))) + ax.plot(t * t_factor, Talpham[-1, i, :], '--', linewidth=2, + c=mymap(rescale(charges[i], Qmin, Qmax))) +cbar = plt.colorbar(sm_Q) +cbar.ax.set_ylabel('$Q \ (nC/cm^2)$', fontsize=28) +ax.legend(fontsize=20) +plt.tight_layout() + +plt.show() diff --git a/deprecated/curve fitting/fit_Vmeff.py b/deprecated/curve fitting/fit_Vmeff.py new file mode 100644 index 0000000..ed7cbaa --- /dev/null +++ b/deprecated/curve fitting/fit_Vmeff.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-01-17 11:41:53 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-05-30 10:00:56 + +''' Detailed fitting strategy of the Vm_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rescale, rsquared, rmse, find_nearest + + +def supraGauss(x, x0, a, b): + return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) + + +def absPow(x, x0, a, b, c): + return a * np.abs(x - x0)**b + c + + +def sigmoid(x, x0, a): + return 1 - 1 / (1 + np.abs(x / x0)**a) + + +def hybridPowGauss(x, a, b, c, d): + return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) + + +def hybridPowSigmoid(x, x0, a, b, c): + return sigmoid(x, x0, b) * absPow(x, 0.0, a, c, 0.0) + + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz charge extended/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing Vm_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + Vm_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + Vm_eff[i, :] = lookup['V_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + Vm_eff_sub = (Vm_eff - Vm_eff[baseline_ind, :]) + + # Symmetrization + print('dividing by Qm to get even profiles') + Vm_eff_sub_even = Vm_eff_sub / Qm + + # Peaks fitting on even profiles + print('fitting power law to peaks of even profiles') + Vm_eff_sub_even_peaks = np.amax(Vm_eff_sub_even, axis=1) + Vm_eff_sub_even_peaks[0] = 0. + pguess_peaks = (1e4, 1.6, 3.5, 0.4) + popt, _ = curve_fit(hybridPowSigmoid, amps, Vm_eff_sub_even_peaks, p0=pguess_peaks) + Vm_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) + + # Normalization + print('normalizing even profiles') + Vm_eff_sub_even_norm = Vm_eff_sub_even[1:, :]\ + / Vm_eff_sub_even_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting hybrid gaussian-power law to normalized Vm_eff') + Vm_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 4)) + for i in range(namps - 1): + popt, _ = curve_fit(hybridPowGauss, Qm, Vm_eff_sub_even_norm[i, :], + bounds=([0., 0., -1e5, 0.], + [1e5, 1e5, 0., 1e5])) + Vm_eff_sub_even_norm_fit[i, :] = hybridPowGauss(Qm, *popt) + params[i, :] = np.asarray(popt) + + + # Predict Vm_eff profiles + print('predicting Vm_eff by reconstructing from fits') + Vm_eff_sub_even_predict = np.vstack((np.zeros(nQ), Vm_eff_sub_even_norm_fit))\ + * Vm_eff_sub_even_peaks_fit.reshape(namps, 1) + Vm_eff_sub_predict = Vm_eff_sub_even_predict * Qm + Vm_eff_predict = Vm_eff_sub_predict + Vm_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + Vm_eff_trueQ = Vm_eff[:, i_trueQ_lb:i_trueQ_ub] + Vm_eff_predict_trueQ = Vm_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + Vm_eff_diff = Vm_eff_predict - Vm_eff + Vm_eff_diff_trueQ = Vm_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + Vm_eff_maxdiff = np.amax(np.abs(Vm_eff_diff), axis=1) + Vm_eff_maxdiff_trueQ = np.amax(np.abs(Vm_eff_diff_trueQ), axis=1) + Vm_eff_rmse = np.empty(namps) + Vm_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + Vm_eff_rmse[i] = rmse(Vm_eff[i, :], Vm_eff_predict[i, :]) + Vm_eff_rmse_trueQ[i] = rmse(Vm_eff_trueQ[i, :], Vm_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: Vm_eff + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff}\ (mV)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, Vm_eff[i, :], c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: Vm_eff_sub + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff-sub}\ (mV)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, Vm_eff_sub[i, :], c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: Vm_eff_sub_even + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff-sub-even}\ (mV\ cm^2/nC)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, Vm_eff_sub_even[i, :], c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 4: Vm_eff_sub_even_peaks + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff-sub-even-peaks}\ (mV\ cm^2/nC)$', fontsize=28) + ax.scatter(amps * 1e-3, Vm_eff_sub_even_peaks, s=30, c='C0', label='data') + ax.plot(amps * 1e-3, Vm_eff_sub_even_peaks_fit, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: Vm_eff_sub_even_norm + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps - 1): + ax.plot(Qm * 1e5, Vm_eff_sub_even_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(0, namps - 1): + ax.plot(Qm * 1e5, Vm_eff_sub_even_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # fig, ax = plt.subplots(figsize=(15, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$V_{m,\ eff-sub-even-norm-diff}\ (-)$', fontsize=28) + # ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + # for i in range(namps - 1): + # ax.plot(Qm * 1e5, Vm_eff_sub_even_norm[i, :] - Vm_eff_sub_even_norm_fit[i, :], + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + # 6: hybrid gaussian-power law parameters + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff-sub-even-norm}\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='a') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='b') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='c') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='d') + ax.grid() + ax.legend(fontsize=28) + + + # 7: Vm_eff_predict + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff}\ prediction\ (mV)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, Vm_eff_predict[i, :], linewidth=2, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: Vm_eff_predict - Vm_eff + fig, ax = plt.subplots(figsize=(15, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff}\ difference\ (mV)$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, Vm_eff_diff[i, :], linewidth=2, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # # 9: RMSE & max absolute error + # fig, ax = plt.subplots(figsize=(15, 7)) + # ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + # ax.set_ylabel('$RMSE\ (mV)$', fontsize=28) + # ax.plot(amps * 1e-3, Vm_eff_rmse, linewidth=2, c='C0', + # label='$RMSE\ -\ entire\ Q_m\ range$') + # ax.plot(amps * 1e-3, Vm_eff_rmse_trueQ, linewidth=2, c='C1', + # label='$RMSE\ -\ realistic\ Q_m\ range$') + # ax.plot(amps * 1e-3, Vm_eff_maxdiff, '--', linewidth=2, c='C0', + # label='$MAE\ -\ entire\ Q_m\ range$') + # ax.plot(amps * 1e-3, Vm_eff_maxdiff_trueQ, '--', linewidth=2, c='C1', + # label='$MAE\ -\ realistic\ Q_m\ range$') + # ax.legend(fontsize=28) + # plt.tight_layout() + + + plt.show() + diff --git a/deprecated/curve fitting/fit_alphaheff.py b/deprecated/curve fitting/fit_alphaheff.py new file mode 100644 index 0000000..8b5c4b5 --- /dev/null +++ b/deprecated/curve fitting/fit_alphaheff.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-01-17 11:41:53 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-14 15:48:21 + +''' Detailed fitting strategy of the alpha_h_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +import scipy.special as sp +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): + normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) + normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) + return 2 * a * normpdf * normcdf + c + + +def gaussian(x, mu, sigma, a): + return a * np.exp(-((x - mu) / (2 * sigma))**2) + + +def Exponential(x, x0, b, c): + return b * np.exp(c * (x - x0)) + + +def Exp0(x, b, c): + return Exponential(x, 0.0, b, c) + + +def hybridExpGauss(x, mu, sigma, a, b, c): + return gaussian(x, mu, sigma, a) + Exponential(x, 0.0, b, -c) + + +def dualGauss(x, mu1, mu2, sigma1, sigma2, a1, a2): + return gaussian(x, mu1, sigma1, a1) + gaussian(x, mu2, sigma2, a2) + + + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing alphah_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + alphah_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + alphah_eff[i, :] = lookup['alpha_h_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + alphah_eff_sub = (alphah_eff - alphah_eff[baseline_ind, :]) + + # Peaks fitting on even profiles + print('fitting exponential law to profiles peaks') + alphah_eff_sub_peaks = np.amax(alphah_eff_sub, axis=1) + popt, _ = curve_fit(Exp0, amps, alphah_eff_sub_peaks, p0=(1.8e14, 3e-5)) + alphah_eff_sub_peaks_fit = Exp0(amps, *popt) + + # Normalization + print('normalizing subtracted profiles') + alphah_eff_sub_norm = alphah_eff_sub[1:, :]\ + / alphah_eff_sub_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting hybrid gaussian-exp law to normalized alphaheff-sub') + alphah_eff_sub_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 6)) + for i in range(namps - 1): + print(i) + popt, _ = curve_fit(dualGauss, Qm, alphah_eff_sub_norm[i], + bounds=([-np.infty, -np.infty, 0., 0., 0., 0.], + [0., 0., np.infty, np.infty, np.infty, np.infty]), + max_nfev=100000) + alphah_eff_sub_norm_fit[i, :] = dualGauss(Qm, *popt) + params[i, :] = np.asarray(popt) + + + + # Predict alphah_eff profiles + print('predicting alphah_eff by reconstructing from fits') + alphah_eff_sub_predict = np.vstack((np.zeros(nQ), alphah_eff_sub_norm_fit))\ + * alphah_eff_sub_peaks_fit.reshape(namps, 1) + alphah_eff_predict = alphah_eff_sub_predict + alphah_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + alphah_eff_trueQ = alphah_eff[:, i_trueQ_lb:i_trueQ_ub] + alphah_eff_predict_trueQ = alphah_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + alphah_eff_diff = alphah_eff_predict - alphah_eff + alphah_eff_diff_trueQ = alphah_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + alphah_eff_maxdiff = np.amax(np.abs(alphah_eff_diff), axis=1) + alphah_eff_maxdiff_trueQ = np.amax(np.abs(alphah_eff_diff_trueQ), axis=1) + alphah_eff_rmse = np.empty(namps) + alphah_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + alphah_eff_rmse[i] = rmse(alphah_eff[i, :], alphah_eff_predict[i, :]) + alphah_eff_rmse_trueQ[i] = rmse(alphah_eff_trueQ[i, :], alphah_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: alphah_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphah_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: alphah_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphah_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: alphah_eff_sub_peaks + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) + ax.scatter(amps * 1e-3, alphah_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') + ax.plot(amps * 1e-3, alphah_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: alphah_eff_sub_norm + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff-sub-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + ax.grid() + for i in range(namps - 1): + ax.plot(Qm * 1e2, alphah_eff_sub_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(namps - 1): + ax.plot(Qm * 1e2, alphah_eff_sub_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 6: parameters + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff-sub-norm}\ fit\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='mu1') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu2') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='sigma2') + ax.plot(amps[1:] * 1e-3, params[:, 4], label='a1') + ax.plot(amps[1:] * 1e-3, params[:, 5], label='a2') + ax.grid() + ax.legend(fontsize=28) + + + # 7: alphah_eff_predict + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphah_eff_predict[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: alphah_eff_predict - alphah_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphah_eff_diff[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 9: RMSE & max absolute error + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) + ax.plot(amps * 1e-3, alphah_eff_rmse * 1e-3, c='C0', + label='$RMSE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, alphah_eff_rmse_trueQ * 1e-3, c='C1', + label='$RMSE\ -\ realistic\ Q_m\ range$') + ax.plot(amps * 1e-3, alphah_eff_maxdiff * 1e-3, '--', c='C0', + label='$MAE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, alphah_eff_maxdiff_trueQ * 1e-3, '--', c='C1', + label='$MAE\ -\ realistic\ Q_m\ range$') + ax.legend(fontsize=28) + plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_alphameff.py b/deprecated/curve fitting/fit_alphameff.py new file mode 100644 index 0000000..b34a1c7 --- /dev/null +++ b/deprecated/curve fitting/fit_alphameff.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-01-17 11:41:53 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-15 15:44:31 + +''' Detailed fitting strategy of the alpha_m_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def supraGauss(x, x0, a, b): + return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) + + +def absPow(x, x0, a, b, c): + return a * np.abs(x - x0)**b + c + + +def sigmoid(x, x0, a, b): + return 1 - 1 / (1 + np.abs((x - x0) / a)**b) + + +def hybridPowGauss(x, a, b, c, d): + return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) + + +def hybridPowSigmoid(x, a, b, c, d): + return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) + + +def piecewiseSigPowGauss(x, x0, a, b, c, d, e, f): + y = np.empty(x.size) + y[x < 0.] = sigmoid(x[x < 0.], x0, a, b) + y[x >= 0.] = hybridPowGauss(x[x >= 0.], c, d, e, f) + return y + + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing alpham_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + alpham_eff = np.empty((nfiles, nQ)) + Vm_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + alpham_eff[i, :] = lookup['alpha_m_eff'] + Vm_eff[i, :] = lookup['V_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + alpham_eff_sub = (alpham_eff - alpham_eff[baseline_ind, :]) + Vm_eff_sub = (Vm_eff - Vm_eff[baseline_ind, :]) + + # Suppressing Qm component + print('dividing by Qm') + alpham_eff_sub_even = alpham_eff_sub / Qm + + # Peaks fitting on even profiles + print('fitting power law to peaks of even profiles') + alpham_eff_sub_even_peaks = np.amax(alpham_eff_sub_even, axis=1) + # pguess_peaks = (1e4, 1.6, 3.5, 0.4) + # popt, _ = curve_fit(hybridPowSigmoid, amps, alpham_eff_sub_even_peaks, p0=pguess_peaks) + # alpham_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) + + # Normalization + print('normalizing even profiles') + alpham_eff_sub_even_norm = alpham_eff_sub_even[1:, :]\ + / alpham_eff_sub_even_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + # print('fitting hybrid gaussian-power law to normalized alphameff-sub-even') + # alpham_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) + # params = np.empty((namps - 1, 7)) + # for i in range(namps - 1): + # popt, _ = curve_fit(piecewiseSigPowGauss, Qm, alpham_eff_sub_even_norm[i, :], + # bounds=([-np.infty, -1., -np.infty, 0., 0., -np.infty, 0.], + # [np.infty, 0., 0., np.infty, np.infty, 0., np.infty])) + # alpham_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) + # params[i, :] = np.asarray(popt) + + # Predict alpham_eff profiles + # print('predicting alpham_eff by reconstructing from fits') + # alpham_eff_sub_even_predict = np.vstack((np.zeros(nQ), alpham_eff_sub_even_norm_fit))\ + # * alpham_eff_sub_even_peaks_fit.reshape(namps, 1) + # alpham_eff_sub_predict = alpham_eff_sub_even_predict * Qm + # alpham_eff_predict = alpham_eff_sub_predict + alpham_eff[baseline_ind, :] + + # # Analyze prediction accuracy, in wide and realistic charge ranges + # alpham_eff_trueQ = alpham_eff[:, i_trueQ_lb:i_trueQ_ub] + # alpham_eff_predict_trueQ = alpham_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + # alpham_eff_diff = alpham_eff_predict - alpham_eff + # alpham_eff_diff_trueQ = alpham_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + # alpham_eff_maxdiff = np.amax(np.abs(alpham_eff_diff), axis=1) + # alpham_eff_maxdiff_trueQ = np.amax(np.abs(alpham_eff_diff_trueQ), axis=1) + # alpham_eff_rmse = np.empty(namps) + # alpham_eff_rmse_trueQ = np.empty(namps) + # for i in range(namps): + # alpham_eff_rmse[i] = rmse(alpham_eff[i, :], alpham_eff_predict[i, :]) + # alpham_eff_rmse_trueQ[i] = rmse(alpham_eff_trueQ[i, :], alpham_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: alpham_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{m,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, alpham_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: alpham_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{m,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e5, Qmax * 1e5) + for i in range(namps): + ax.plot(Qm * 1e5, alpham_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # # 3: alpham_eff_sub_even + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # for i in range(namps): + # ax.plot(Qm * 1e2, alpham_eff_sub_even[i, :] * 1e-3, + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + # # 4: alpham_eff_sub_even_peaks + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + # ax.scatter(amps * 1e-3, alpham_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') + # ax.plot(amps * 1e-3, alpham_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') + # ax.legend(fontsize=28) + # plt.tight_layout() + + # # 5: alpham_eff_sub_even_norm + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) + # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # ax.grid() + # for i in range(namps - 1): + # ax.plot(Qm * 1e2, alpham_eff_sub_even_norm[i, :], + # c=mymap(rescale(amps[i], Amin, Amax))) + # for i in range(namps - 1): + # ax.plot(Qm * 1e2, alpham_eff_sub_even_norm_fit[i, :], '--', + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + + # # 6: piecewise function parameters + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{m,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) + # ax.plot(amps[1:] * 1e-3, params[:, 0], label='x0') + # ax.plot(amps[1:] * 1e-3, params[:, 1], label='a') + # ax.plot(amps[1:] * 1e-3, params[:, 2], label='b') + # ax.plot(amps[1:] * 1e-3, params[:, 3], label='c') + # ax.plot(amps[1:] * 1e-3, params[:, 4], label='d') + # ax.plot(amps[1:] * 1e-3, params[:, 5], label='e') + # ax.plot(amps[1:] * 1e-3, params[:, 6], label='f') + # ax.grid() + # ax.legend(fontsize=28) + + + # # 7: alpham_eff_predict + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{m,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # for i in range(namps): + # ax.plot(Qm * 1e2, alpham_eff_predict[i, :] * 1e-3, linewidth=2, + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + + # # 8: alpham_eff_predict - alpham_eff + # # fig, ax = plt.subplots(figsize=(21, 7)) + # # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # # ax.set_ylabel('$\\alpha_{m,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + # # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # # for i in range(namps): + # # ax.plot(Qm * 1e2, alpham_eff_diff[i, :] * 1e-3, linewidth=2, + # # c=mymap(rescale(amps[i], Amin, Amax))) + # # cbar = plt.colorbar(sm_amp) + # # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # # plt.tight_layout() + + # # 9: RMSE & max absolute error + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + # ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) + # ax.plot(amps * 1e-3, alpham_eff_rmse * 1e-3, linewidth=2, c='C0', + # label='$RMSE\ -\ entire\ Q_m\ range$') + # ax.plot(amps * 1e-3, alpham_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', + # label='$RMSE\ -\ realistic\ Q_m\ range$') + # ax.plot(amps * 1e-3, alpham_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', + # label='$MAE\ -\ entire\ Q_m\ range$') + # ax.plot(amps * 1e-3, alpham_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', + # label='$MAE\ -\ realistic\ Q_m\ range$') + # ax.legend(fontsize=28) + # plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_alphaneff.py b/deprecated/curve fitting/fit_alphaneff.py new file mode 100644 index 0000000..03f451a --- /dev/null +++ b/deprecated/curve fitting/fit_alphaneff.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-07 18:52:13 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-14 15:48:31 + +''' Detailed fitting strategy of the alpha_n_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def supraGauss(x, x0, a, b): + return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) + + +def absPow(x, x0, a, b, c): + return a * np.abs(x - x0)**b + c + + +def sigmoid(x, x0, a, b): + return 1 - 1 / (1 + np.abs((x - x0) / a)**b) + + +def hybridPowGauss(x, a, b, c, d): + return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) + + +def hybridPowSigmoid(x, a, b, c, d): + return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) + + +def piecewiseSigPowGauss(x, x0, a, b, c, d, e, f): + y = np.empty(x.size) + y[x < 0.] = sigmoid(x[x < 0.], x0, a, b) + y[x >= 0.] = hybridPowGauss(x[x >= 0.], c, d, e, f) + return y + + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing alphan_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + alphan_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + alphan_eff[i, :] = lookup['alpha_n_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + alphan_eff_sub = (alphan_eff - alphan_eff[baseline_ind, :]) + + # Suppressing Qm component + print('dividing by Qm') + alphan_eff_sub_even = alphan_eff_sub / Qm + + # Peaks fitting on even profiles + print('fitting power law to peaks of even profiles') + alphan_eff_sub_even_peaks = np.amax(alphan_eff_sub_even, axis=1) + pguess_peaks = (1e4, 1.6, 3.5, 0.4) + popt, _ = curve_fit(hybridPowSigmoid, amps, alphan_eff_sub_even_peaks, p0=pguess_peaks) + alphan_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) + + # Normalization + print('normalizing even profiles') + alphan_eff_sub_even_norm = alphan_eff_sub_even[1:, :]\ + / alphan_eff_sub_even_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting hybrid gaussian-power law to normalized alphaneff-sub-even') + alphan_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 7)) + for i in range(namps - 1): + popt, _ = curve_fit(piecewiseSigPowGauss, Qm, alphan_eff_sub_even_norm[i, :], + bounds=([-np.infty, -1., -np.infty, 0., 0., -np.infty, 0.], + [np.infty, 0., 0., np.infty, np.infty, 0., np.infty])) + alphan_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) + params[i, :] = np.asarray(popt) + + # Predict alphan_eff profiles + print('predicting alphan_eff by reconstructing from fits') + alphan_eff_sub_even_predict = np.vstack((np.zeros(nQ), alphan_eff_sub_even_norm_fit))\ + * alphan_eff_sub_even_peaks_fit.reshape(namps, 1) + alphan_eff_sub_predict = alphan_eff_sub_even_predict * Qm + alphan_eff_predict = alphan_eff_sub_predict + alphan_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + alphan_eff_trueQ = alphan_eff[:, i_trueQ_lb:i_trueQ_ub] + alphan_eff_predict_trueQ = alphan_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + alphan_eff_diff = alphan_eff_predict - alphan_eff + alphan_eff_diff_trueQ = alphan_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + alphan_eff_maxdiff = np.amax(np.abs(alphan_eff_diff), axis=1) + alphan_eff_maxdiff_trueQ = np.amax(np.abs(alphan_eff_diff_trueQ), axis=1) + alphan_eff_rmse = np.empty(namps) + alphan_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + alphan_eff_rmse[i] = rmse(alphan_eff[i, :], alphan_eff_predict[i, :]) + alphan_eff_rmse_trueQ[i] = rmse(alphan_eff_trueQ[i, :], alphan_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: alphan_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphan_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: alphan_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphan_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: alphan_eff_sub_even + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphan_eff_sub_even[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 4: alphan_eff_sub_even_peaks + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + ax.scatter(amps * 1e-3, alphan_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') + ax.plot(amps * 1e-3, alphan_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: alphan_eff_sub_even_norm + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + ax.grid() + for i in range(namps - 1): + ax.plot(Qm * 1e2, alphan_eff_sub_even_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(namps - 1): + ax.plot(Qm * 1e2, alphan_eff_sub_even_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 6: piecewise function parameters + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='x0') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='a') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='b') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='c') + ax.plot(amps[1:] * 1e-3, params[:, 4], label='d') + ax.plot(amps[1:] * 1e-3, params[:, 5], label='e') + ax.plot(amps[1:] * 1e-3, params[:, 6], label='f') + ax.grid() + ax.legend(fontsize=28) + + + # 7: alphan_eff_predict + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, alphan_eff_predict[i, :] * 1e-3, linewidth=2, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: alphan_eff_predict - alphan_eff + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$\\alpha_{n,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # for i in range(namps): + # ax.plot(Qm * 1e2, alphan_eff_diff[i, :] * 1e-3, linewidth=2, + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + # 9: RMSE & max absolute error + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) + ax.plot(amps * 1e-3, alphan_eff_rmse * 1e-3, linewidth=2, c='C0', + label='$RMSE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, alphan_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', + label='$RMSE\ -\ realistic\ Q_m\ range$') + ax.plot(amps * 1e-3, alphan_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', + label='$MAE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, alphan_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', + label='$MAE\ -\ realistic\ Q_m\ range$') + ax.legend(fontsize=28) + plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_betaheff.py b/deprecated/curve fitting/fit_betaheff.py new file mode 100644 index 0000000..6f47e4c --- /dev/null +++ b/deprecated/curve fitting/fit_betaheff.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-07 15:15:11 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-14 15:48:36 + +''' Detailed fitting strategy of the beta_h_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def gaussian(x, mu, sigma, a): + return a * np.exp(-((x - mu) / (2 * sigma))**2) + + +def gauss3(x, a1, mu1, sig1, a2, mu2, sig2, a3, mu3, sig3): + return gaussian(x, mu1, sig1, a1) + gaussian(x, mu2, sig2, a2) + gaussian(x, mu3, sig3, a3) + + +def sigmoid(x, x0, a, b): + return 1 - 1 / (1 + np.abs((x - x0) / a)**b) + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing betah_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + betah_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + betah_eff[i, :] = lookup['beta_h_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + betah_eff_sub = (betah_eff - betah_eff[baseline_ind, :]) + + # Peaks detection on subtracted profiles + print('finding peaks on subtracted profiles') + betah_eff_sub_peaks = np.amax(np.abs(betah_eff_sub), axis=1) + + # Normalization + print('normalizing subtracted profiles') + betah_eff_sub_norm = betah_eff_sub[1:, :]\ + / betah_eff_sub_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting "mexican hat" to normalized betaheff-sub') + betah_eff_sub_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 9)) + for i in range(namps - 1): + popt, _ = curve_fit(gauss3, Qm, betah_eff_sub_norm[i], + bounds=([0.0, -0.5, 0.0, -1.2, -0.2, 0., 0.0, 0.0, 0.0], + [0.3, -0.2, np.inf, -0.8, 0.0, np.inf, 0.1, 0.1, np.inf]), + max_nfev=100000) + betah_eff_sub_norm_fit[i, :] = gauss3(Qm, *popt) + params[i, :] = np.asarray(popt) + + # Predict betah_eff profiles + print('predicting betah_eff by reconstructing from fits') + betah_eff_sub_predict = np.vstack((np.zeros(nQ), betah_eff_sub_norm_fit))\ + * betah_eff_sub_peaks.reshape(namps, 1) + betah_eff_predict = betah_eff_sub_predict + betah_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + betah_eff_trueQ = betah_eff[:, i_trueQ_lb:i_trueQ_ub] + betah_eff_predict_trueQ = betah_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + betah_eff_diff = betah_eff_predict - betah_eff + betah_eff_diff_trueQ = betah_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + betah_eff_maxdiff = np.amax(np.abs(betah_eff_diff), axis=1) + betah_eff_maxdiff_trueQ = np.amax(np.abs(betah_eff_diff_trueQ), axis=1) + betah_eff_rmse = np.empty(namps) + betah_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + betah_eff_rmse[i] = rmse(betah_eff[i, :], betah_eff_predict[i, :]) + betah_eff_rmse_trueQ[i] = rmse(betah_eff_trueQ[i, :], betah_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: betah_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betah_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: betah_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betah_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: betah_eff_sub_peaks + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) + ax.scatter(amps * 1e-3, betah_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') + # ax.plot(amps * 1e-3, betah_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: betah_eff_sub_norm + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff-sub-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + ax.grid() + for i in range(namps - 1): + ax.plot(Qm * 1e2, betah_eff_sub_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(namps - 1): + ax.plot(Qm * 1e2, betah_eff_sub_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 6: parameters + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff-sub-norm}\ fit\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='a1') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu1') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='a2') + ax.plot(amps[1:] * 1e-3, params[:, 4], label='mu2') + ax.plot(amps[1:] * 1e-3, params[:, 5], label='sigma2') + ax.plot(amps[1:] * 1e-3, params[:, 6], label='a3') + ax.plot(amps[1:] * 1e-3, params[:, 7], label='mu3') + ax.plot(amps[1:] * 1e-3, params[:, 8], label='sigma3') + ax.grid() + ax.legend(fontsize=28) + + + # 7: betah_eff_predict + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betah_eff_predict[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: betah_eff_predict - betah_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betah_eff_diff[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 9: RMSE & max absolute error + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) + ax.plot(amps * 1e-3, betah_eff_rmse * 1e-3, c='C0', + label='$RMSE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betah_eff_rmse_trueQ * 1e-3, c='C1', + label='$RMSE\ -\ realistic\ Q_m\ range$') + ax.plot(amps * 1e-3, betah_eff_maxdiff * 1e-3, '--', c='C0', + label='$MAE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betah_eff_maxdiff_trueQ * 1e-3, '--', c='C1', + label='$MAE\ -\ realistic\ Q_m\ range$') + ax.legend(fontsize=28) + plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_betameff.py b/deprecated/curve fitting/fit_betameff.py new file mode 100644 index 0000000..8ce86db --- /dev/null +++ b/deprecated/curve fitting/fit_betameff.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-06 14:20:03 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-14 15:48:41 + +''' Detailed fitting strategy of the beta_m_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def supraGauss(x, x0, a, b): + return 2 / (np.exp(a * np.abs(x - x0)**b) + np.exp(-a * np.abs(x - x0)**b)) + + +def absPow(x, x0, a, b, c): + return a * np.abs(x - x0)**b + c + + +def sigmoid(x, x0, a, b): + return 1 - 1 / (1 + np.abs((x - x0) / a)**b) + + +def hybridPowGauss(x, a, b, c, d): + return supraGauss(x, 0.0, a, b) * absPow(x, 0.0, c, d, 1.0) + + +def hybridPowSigmoid(x, a, b, c, d): + return sigmoid(x, 0.0, a, b) * absPow(x, 0.0, c, d, 0.0) + + +def piecewiseSigPowGauss(x, thr, x0, a, b, c, d, e, f): + y = np.empty(x.size) + y[x < thr] = hybridPowGauss(x[x < thr], c, d, e, f) + y[x >= thr] = sigmoid(x[x >= thr], x0, a, b) + return y + + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing betam_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + betam_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + betam_eff[i, :] = lookup['beta_m_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + betam_eff_sub = (betam_eff - betam_eff[baseline_ind, :]) + + # Suppressing Qm component + print('dividing by -Qm') + betam_eff_sub_even = - betam_eff_sub / Qm + + # Peaks fitting on even profiles + print('fitting power law to peaks of even profiles') + betam_eff_sub_even_peaks = np.amax(betam_eff_sub_even, axis=1) + pguess_peaks = (1e4, 1.6, 3.5, 0.4) + popt, _ = curve_fit(hybridPowSigmoid, amps, betam_eff_sub_even_peaks, p0=pguess_peaks) + betam_eff_sub_even_peaks_fit = hybridPowSigmoid(amps, *popt) + + # Normalization + print('normalizing even profiles') + betam_eff_sub_even_norm = betam_eff_sub_even[1:, :]\ + / betam_eff_sub_even_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting hybrid gaussian-power law to normalized betameff-sub-even') + betam_eff_sub_even_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 8)) + for i in range(namps - 1): + popt, _ = curve_fit(piecewiseSigPowGauss, Qm, betam_eff_sub_even_norm[i, :], + bounds=([-0.5, -0.5, -1., -np.infty, 0., 0., -np.infty, 0.], + [0, 0.5, 0., 0., np.infty, np.infty, 0., np.infty])) + betam_eff_sub_even_norm_fit[i, :] = piecewiseSigPowGauss(Qm, *popt) + params[i, :] = np.asarray(popt) + + # Predict betam_eff profiles + print('predicting betam_eff by reconstructing from fits') + betam_eff_sub_even_predict = np.vstack((np.zeros(nQ), betam_eff_sub_even_norm_fit))\ + * betam_eff_sub_even_peaks_fit.reshape(namps, 1) + betam_eff_sub_predict = - betam_eff_sub_even_predict * Qm + betam_eff_predict = betam_eff_sub_predict + betam_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + betam_eff_trueQ = betam_eff[:, i_trueQ_lb:i_trueQ_ub] + betam_eff_predict_trueQ = betam_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + betam_eff_diff = betam_eff_predict - betam_eff + betam_eff_diff_trueQ = betam_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + betam_eff_maxdiff = np.amax(np.abs(betam_eff_diff), axis=1) + betam_eff_maxdiff_trueQ = np.amax(np.abs(betam_eff_diff_trueQ), axis=1) + betam_eff_rmse = np.empty(namps) + betam_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + betam_eff_rmse[i] = rmse(betam_eff[i, :], betam_eff_predict[i, :]) + betam_eff_rmse_trueQ[i] = rmse(betam_eff_trueQ[i, :], betam_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: betam_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betam_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: betam_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betam_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: betam_eff_sub_even + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff-sub-even}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betam_eff_sub_even[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 4: betam_eff_sub_even_peaks + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff-sub-even-peaks}\ (ms^{-1}\ cm^2/nC)$', fontsize=28) + ax.scatter(amps * 1e-3, betam_eff_sub_even_peaks * 1e-3, s=30, c='C0', label='data') + ax.plot(amps * 1e-3, betam_eff_sub_even_peaks_fit * 1e-3, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: betam_eff_sub_even_norm + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff-sub-even-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + ax.grid() + for i in range(namps - 1): + ax.plot(Qm * 1e2, betam_eff_sub_even_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(namps - 1): + ax.plot(Qm * 1e2, betam_eff_sub_even_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 6: piecewise function parameters + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff-sub-even-norm}\ fit\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='thr') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='x0') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='a') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='b') + ax.plot(amps[1:] * 1e-3, params[:, 4], label='c') + ax.plot(amps[1:] * 1e-3, params[:, 5], label='d') + ax.plot(amps[1:] * 1e-3, params[:, 6], label='e') + ax.plot(amps[1:] * 1e-3, params[:, 7], label='f') + ax.grid() + ax.legend(fontsize=28) + + + # 7: betam_eff_predict + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betam_eff_predict[i, :] * 1e-3, linewidth=2, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: betam_eff_predict - betam_eff + # fig, ax = plt.subplots(figsize=(21, 7)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + # ax.set_ylabel('$\\beta_{m,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + # ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + # for i in range(namps): + # ax.plot(Qm * 1e2, betam_eff_diff[i, :] * 1e-3, linewidth=2, + # c=mymap(rescale(amps[i], Amin, Amax))) + # cbar = plt.colorbar(sm_amp) + # cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + # plt.tight_layout() + + # 9: RMSE & max absolute error + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + ax.set_ylabel('$RMSE\ (ms^{-1})$', fontsize=28) + ax.plot(amps * 1e-3, betam_eff_rmse * 1e-3, linewidth=2, c='C0', + label='$RMSE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betam_eff_rmse_trueQ * 1e-3, linewidth=2, c='C1', + label='$RMSE\ -\ realistic\ Q_m\ range$') + ax.plot(amps * 1e-3, betam_eff_maxdiff * 1e-3, '--', linewidth=2, c='C0', + label='$MAE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betam_eff_maxdiff_trueQ * 1e-3, '--', linewidth=2, c='C1', + label='$MAE\ -\ realistic\ Q_m\ range$') + ax.legend(fontsize=28) + plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_betaneff.py b/deprecated/curve fitting/fit_betaneff.py new file mode 100644 index 0000000..8df40cd --- /dev/null +++ b/deprecated/curve fitting/fit_betaneff.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-07 18:55:49 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-02-14 15:48:50 + +''' Detailed fitting strategy of the beta_n_eff profiles ''' + +import os +import ntpath +import re +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.optimize import curve_fit +import scipy.special as sp +from utils import OpenFilesDialog, rescale, rmse, find_nearest + + +def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): + normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) + normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) + return 2 * a * normpdf * normcdf + c + + +def gaussian(x, mu, sigma, a): + return a * np.exp(-((x - mu) / (2 * sigma))**2) + + +def Exponential(x, x0, b, c): + return b * np.exp(c * (x - x0)) + + +def Exp0(x, b, c): + return Exponential(x, 0.0, b, c) + + +def hybridExpGauss(x, mu, sigma, a, b, c): + return gaussian(x, mu, sigma, a) + Exponential(x, 0.0, b, -c) + + +def dualGauss(x, mu1, mu2, sigma1, sigma2, a1, a2): + return gaussian(x, mu1, sigma1, a1) + gaussian(x, mu2, sigma2, a2) + + + + +# Select data files (PKL) +lookup_root = '../Output/lookups extended 0.35MHz/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +plot_bool = 1 + +nQ = 300 +baseline_ind = -1 + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing betan_eff profiles from lookup tables') + nfiles = len(lookup_filepaths) + + # Initialize coefficients matrices + amps = np.empty(nfiles) + betan_eff = np.empty((nfiles, nQ)) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + betan_eff[i, :] = lookup['beta_n_eff'] + + if baseline_ind == -1: + print('Error: no baseline profile selected') + else: + + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = nfiles + + i_trueQ_lb, trueQ_lb = find_nearest(Qm, -0.8) + i_trueQ_ub, trueQ_ub = find_nearest(Qm, 0.4) + + + # Baseline subtraction + print('subtracting baseline (Adrive = 0) from profiles') + betan_eff_sub = (betan_eff - betan_eff[baseline_ind, :]) + + # Peaks fitting on even profiles + print('fitting exponential law to profiles peaks') + betan_eff_sub_peaks = np.amax(betan_eff_sub, axis=1) + popt, _ = curve_fit(Exp0, amps, betan_eff_sub_peaks, p0=(1.8e14, 3e-5)) + betan_eff_sub_peaks_fit = Exp0(amps, *popt) + + # Normalization + print('normalizing subtracted profiles') + betan_eff_sub_norm = betan_eff_sub[1:, :]\ + / betan_eff_sub_peaks[1:].reshape(namps - 1, 1) + + # Normalized profiles fitting + print('fitting hybrid gaussian-exp law to normalized betaneff-sub') + betan_eff_sub_norm_fit = np.empty((namps - 1, nQ)) + params = np.empty((namps - 1, 6)) + for i in range(namps - 1): + print(i) + popt, _ = curve_fit(dualGauss, Qm, betan_eff_sub_norm[i], + bounds=([-np.infty, -np.infty, 0., 0., 0., 0.], + [0., 0., np.infty, np.infty, np.infty, np.infty]), + max_nfev=100000) + betan_eff_sub_norm_fit[i, :] = dualGauss(Qm, *popt) + params[i, :] = np.asarray(popt) + + + + # Predict betan_eff profiles + print('predicting betan_eff by reconstructing from fits') + betan_eff_sub_predict = np.vstack((np.zeros(nQ), betan_eff_sub_norm_fit))\ + * betan_eff_sub_peaks_fit.reshape(namps, 1) + betan_eff_predict = betan_eff_sub_predict + betan_eff[baseline_ind, :] + + # Analyze prediction accuracy, in wide and realistic charge ranges + betan_eff_trueQ = betan_eff[:, i_trueQ_lb:i_trueQ_ub] + betan_eff_predict_trueQ = betan_eff_predict[:, i_trueQ_lb:i_trueQ_ub] + betan_eff_diff = betan_eff_predict - betan_eff + betan_eff_diff_trueQ = betan_eff_diff[:, i_trueQ_lb:i_trueQ_ub] + betan_eff_maxdiff = np.amax(np.abs(betan_eff_diff), axis=1) + betan_eff_maxdiff_trueQ = np.amax(np.abs(betan_eff_diff_trueQ), axis=1) + betan_eff_rmse = np.empty(namps) + betan_eff_rmse_trueQ = np.empty(namps) + for i in range(namps): + betan_eff_rmse[i] = rmse(betan_eff[i, :], betan_eff_predict[i, :]) + betan_eff_rmse_trueQ[i] = rmse(betan_eff_trueQ[i, :], betan_eff_predict_trueQ[i, :]) + + + if plot_bool == 1: + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + # 1: betan_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betan_eff[i, :] * 1e-3, c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 2: betan_eff_sub + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff-sub}\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betan_eff_sub[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 3: betan_eff_sub_peaks + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff-sub-peaks}\ (ms^{-1})$', fontsize=28) + ax.scatter(amps * 1e-3, betan_eff_sub_peaks * 1e-3, s=30, c='C0', label='data') + ax.plot(amps * 1e-3, betan_eff_sub_peaks_fit * 1e-3, c='C1', label='fit') + ax.legend(fontsize=28) + plt.tight_layout() + + # 5: betan_eff_sub_norm + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff-sub-norm}\ (-)$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + ax.grid() + for i in range(namps - 1): + ax.plot(Qm * 1e2, betan_eff_sub_norm[i, :], + c=mymap(rescale(amps[i], Amin, Amax))) + for i in range(namps - 1): + ax.plot(Qm * 1e2, betan_eff_sub_norm_fit[i, :], '--', + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 6: parameters + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive}\ (kPa)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff-sub-norm}\ fit\ params$', fontsize=28) + ax.plot(amps[1:] * 1e-3, params[:, 0], label='mu1') + ax.plot(amps[1:] * 1e-3, params[:, 1], label='mu2') + ax.plot(amps[1:] * 1e-3, params[:, 2], label='sigma1') + ax.plot(amps[1:] * 1e-3, params[:, 3], label='sigma2') + ax.plot(amps[1:] * 1e-3, params[:, 4], label='a1') + ax.plot(amps[1:] * 1e-3, params[:, 5], label='a2') + ax.grid() + ax.legend(fontsize=28) + + + # 7: betan_eff_predict + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff}\ prediction\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betan_eff_predict[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + + # 8: betan_eff_predict - betan_eff + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff}\ difference\ (ms^{-1})$', fontsize=28) + ax.set_xlim(Qmin * 1e2, Qmax * 1e2) + for i in range(namps): + ax.plot(Qm * 1e2, betan_eff_diff[i, :] * 1e-3, + c=mymap(rescale(amps[i], Amin, Amax))) + cbar = plt.colorbar(sm_amp) + cbar.ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=28) + plt.tight_layout() + + # 9: RMSE & max absolute error + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$A_{drive} \ (kPa)$', fontsize=28) + ax.set_ylabel('$Error\ (ms^{-1})$', fontsize=28) + ax.plot(amps * 1e-3, betan_eff_rmse * 1e-3, c='C0', + label='$RMSE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betan_eff_rmse_trueQ * 1e-3, c='C1', + label='$RMSE\ -\ realistic\ Q_m\ range$') + ax.plot(amps * 1e-3, betan_eff_maxdiff * 1e-3, '--', c='C0', + label='$MAE\ -\ entire\ Q_m\ range$') + ax.plot(amps * 1e-3, betan_eff_maxdiff_trueQ * 1e-3, '--', c='C1', + label='$MAE\ -\ realistic\ Q_m\ range$') + ax.legend(fontsize=28) + plt.tight_layout() + + plt.show() + diff --git a/deprecated/curve fitting/fit_eff_coeffs.py b/deprecated/curve fitting/fit_eff_coeffs.py new file mode 100644 index 0000000..b7ab4f4 --- /dev/null +++ b/deprecated/curve fitting/fit_eff_coeffs.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-01-15 18:08:06 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-01-20 10:18:50 + +''' Plot the profiles of the 9 charge-dependent "effective" HH coefficients, +along with a fitted mathematical expression ''' + + +import os +import ntpath +import pickle +import matplotlib.pyplot as plt +import numpy as np +# import math as math +import scipy.special as sp +from scipy.optimize import curve_fit +from utils import OpenFilesDialog, rsquared + + + + + +def fit_amn(x, a, b, c, d): + # return a * c * (x - c - b) * np.exp((x - b) / c) - x + d + return a * c**2 * sp.spence(1 - (-np.exp(-b / c) * (np.exp(x / c) - np.exp(b / c)))) + d + + +# -------------------------------------------------------------------- + +def gaus(x, a, x0, sigma): + return a * np.exp(- (x - x0)**2 / (2 * sigma**2)) + + +def compexp(x, a, b, c, d, e, f): + return (a * x + b) / (c * np.exp(d * x + e) + f) + + +def expgrowth(x, x0, a): + return np.exp(a * (x - x0)) + + +def expdecay(x, x0, a): + return np.exp(-a * (x - x0)) + + +def sigmoid(x, x0, a, b): + return a / (1 + np.exp(- b * (x - x0))) + + +def dualexp(x, x1, x2, a, b): + return np.exp(a * (x - x1)) + np.exp(- b * (x - x2)) + + +def dualregime(x, x0, a, b): + return a * (x - x0) / (np.exp(- b * (x - x0)) - 1) + + +def skewed_gaussian(x, mu=0, sigma=1, alpha=0, a=1, c=0): + normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))) + normcdf = (0.5 * (1 + sp.erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2))))) + return 2 * a * normpdf * normcdf + c + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz linear amplitude/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepath = OpenFilesDialog(lookup_absroot, 'pkl') + +# Check dialog output +if not lookup_filepath: + print('error: no lookup table selected') +elif len(lookup_filepath) > 1: + print('error multiple lookup tables selected') +else: + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepath[0]) + print('loading lookup table') + with open(lookup_filepath[0], 'rb') as fh: + lookup = pickle.load(fh) + + print('finding best fits with analytical expressions') + + # Vm_eff + print('Vm_eff') + z = np.polyfit(lookup['Q'], lookup['V_eff'], 3) + p = np.poly1d(z) + Veff_fit = p(lookup['Q']) + r2 = rsquared(lookup['V_eff'], Veff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$V_{m,\ eff}\ (mV)$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['V_eff'], linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, Veff_fit, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # alpha_m_eff + print('alpha_m_eff') + # z = np.polyfit(lookup['Q'], lookup['alpha_m_eff'], 5) + # p = np.poly1d(z) + # alpha_m_eff_fit = p(lookup['Q']) + popt, _ = curve_fit(fit_amn, lookup['Q'], lookup['alpha_m_eff'], maxfev=100000) + alpha_m_eff_fit = fit_amn(lookup['Q'], *popt) + r2 = rsquared(lookup['alpha_m_eff'], alpha_m_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{m,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['alpha_m_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, alpha_m_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # beta_m_eff + print('beta_m_eff') + pguess = (-0.7, 0.2, 3, 5000) + beta_m_eff_guess = skewed_gaussian(lookup['Q'], *pguess) + popt, _ = curve_fit(skewed_gaussian, lookup['Q'], lookup['beta_m_eff'], p0=pguess) + beta_m_eff_fit = skewed_gaussian(lookup['Q'], *popt) + r2 = rsquared(lookup['beta_m_eff'], beta_m_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{m,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['beta_m_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, beta_m_eff_fit * 1e-3, linewidth=2, label='fit') + ax.plot(lookup['Q'] * 1e2, beta_m_eff_guess * 1e-3, linewidth=2, label='guess') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # alpha_h_eff + print('alpha_h_eff') + pguess = (-0.7, 0.2, 3, 20000) + alpha_h_eff_guess = skewed_gaussian(lookup['Q'], *pguess) + popt, _ = curve_fit(skewed_gaussian, lookup['Q'], lookup['alpha_h_eff'], p0=pguess) + alpha_h_eff_fit = skewed_gaussian(lookup['Q'], *popt) + r2 = rsquared(lookup['alpha_h_eff'], alpha_h_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{h,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['alpha_h_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, alpha_h_eff_fit * 1e-3, linewidth=2, label='fit') + ax.plot(lookup['Q'] * 1e2, alpha_h_eff_guess * 1e-3, label='guess') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # beta_h_eff + print('beta_h_eff') + popt, _ = curve_fit(sigmoid, lookup['Q'], lookup['beta_h_eff'], p0=(-0.1, 4000, 20)) + beta_h_eff_fit = sigmoid(lookup['Q'], *popt) + r2 = rsquared(lookup['beta_h_eff'], beta_h_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{h,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['beta_h_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, beta_h_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # alpha_n_eff + print('alpha_n_eff') + popt, _ = curve_fit(gaus, lookup['Q'], lookup['alpha_n_eff']) + alpha_n_eff_fit = gaus(lookup['Q'], *popt) + r2 = rsquared(lookup['alpha_n_eff'], alpha_n_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\alpha_{n,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['alpha_n_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, alpha_n_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # beta_n_eff + print('beta_n_eff') + popt, _ = curve_fit(expdecay, lookup['Q'], lookup['beta_n_eff']) + beta_n_eff_fit = expdecay(lookup['Q'], *popt) + r2 = rsquared(lookup['beta_n_eff'], beta_n_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$\\beta_{n,\ eff}\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['beta_n_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, beta_n_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # pinf_over_taup_eff + print('pinf_over_taup_eff') + popt, _ = curve_fit(expgrowth, lookup['Q'], lookup['pinf_over_taup_eff']) + pinf_over_taup_eff_fit = expgrowth(lookup['Q'], *popt) + r2 = rsquared(lookup['pinf_over_taup_eff'], pinf_over_taup_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$p_{\\infty} / \\tau_p\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['pinf_over_taup_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, pinf_over_taup_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + # inv_taup_eff + print('inv_taup_eff') + popt, _ = curve_fit(dualexp, lookup['Q'], lookup['inv_taup_eff'], p0=(-0.2, -0.04, 15, 15)) + inv_taup_eff_fit = dualexp(lookup['Q'], *popt) + r2 = rsquared(lookup['inv_taup_eff'], inv_taup_eff_fit) + fig, ax = plt.subplots(figsize=(21, 7)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=28) + ax.set_ylabel('$1 / \\tau_p\ (ms^{-1})$', fontsize=28) + ax.plot(lookup['Q'] * 1e2, lookup['inv_taup_eff'] * 1e-3, linewidth=2, label='data') + ax.plot(lookup['Q'] * 1e2, inv_taup_eff_fit * 1e-3, linewidth=2, label='fit') + ax.text(0.45, 0.9, '$R^2 = {:.5f}$'.format(r2), transform=ax.transAxes, fontsize=28) + ax.legend() + + plt.show() diff --git a/deprecated/file management/create_data_folders.py b/deprecated/file management/create_data_folders.py new file mode 100644 index 0000000..15fbfd5 --- /dev/null +++ b/deprecated/file management/create_data_folders.py @@ -0,0 +1,24 @@ +import os +import tkinter as tk +from tkinter import filedialog + +# Get root directory from user +root = tk.Tk() +root.withdraw() +root = filedialog.askdirectory() +assert root, 'No root directory chosen' + +freqs = [50 * x for x in range(2, 21)] +subdirs = ['CW', 'PRF0.10kHz', 'PRF1.50kHz'] + +for f in freqs: + dirpath = '{}/{}kHz'.format(root, f) + print(dirpath) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + for sd in subdirs: + subdirpath = '{}/{}'.format(dirpath, sd) + print('-->', subdirpath) + if not os.path.exists(subdirpath): + os.makedirs(subdirpath) + diff --git a/deprecated/file management/datasort.py b/deprecated/file management/datasort.py new file mode 100644 index 0000000..3f96076 --- /dev/null +++ b/deprecated/file management/datasort.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-07-18 11:54:28 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-07 11:27:58 + +''' Re-organize data files ''' + +import os +import numpy as np + +data_root_in = 'C:/Users/admin/Desktop/DATA2/TC/PRF1.50kHz' +data_root_out = 'C:/Users/admin/Desktop/DATA' + +neurons = ['TC'] +freqs = np.arange(850, 1001, 50) +tmpdir = 'tmp6' + +amps = np.arange(50, 601, 50) +PRFs = [1.5] +DFs = np.arange(0.1, 0.91, 0.1) +durs = np.arange(10, 101, 10) + +nfiles = len(neurons) * len(freqs) * len(amps) * len(PRFs) * len(durs) * len(DFs) +ifile = 0 +for neuron in neurons: + for f in freqs: + filedir_in = '{}/{}'.format(data_root_in, tmpdir) + filedir_out = '{}/{}/{:.0f}kHz'.format(data_root_out, neuron, f) + # print(filedir_in) + # print(filedir_out) + for A in amps: + for PRF in PRFs: + for t in durs: + # ifile += 1 + # CWcode = 'sim_{}_CW_32nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms'.format(neuron, f, A, t) + # filepath_in = '{}/{}_effective.pkl'.format(filedir_in, CWcode) + # filepath_out = '{}/CW/{}_effective.pkl'.format(filedir_out, CWcode) + # print('renaming file {}/{}'.format(ifile, nfiles)) + # print(filepath_in) + # print(filepath_out) + # if os.path.isfile(filepath_in): + # os.rename(filepath_in, filepath_out) + for DF in DFs: + ifile += 1 + PWcode = 'sim_{}_PW_32nm_{:.0f}kHz_{:.0f}kPa_{:.0f}ms_PRF{:.2f}kHz_DF{:.2f}'.format(neuron, f, A, t, PRF, DF) + filepath_in = '{}/{}_effective.pkl'.format(filedir_in, PWcode) + filepath_out = '{}/PRF{:.2f}kHz/{}_effective.pkl'.format(filedir_out, PRF, PWcode) + print('renaming file {}/{}'.format(ifile, nfiles)) + print(filepath_in) + print(filepath_out) + if os.path.isfile(filepath_in): + os.rename(filepath_in, filepath_out) diff --git a/deprecated/kriging/lhs.py b/deprecated/kriging/lhs.py new file mode 100644 index 0000000..6d6b221 --- /dev/null +++ b/deprecated/kriging/lhs.py @@ -0,0 +1,21 @@ +import numpy as np +from pyDOE import lhs + + +def lh2DWithCorners(n, x1_range, x2_range, crtrn): + ''' This function generates a 2D Latin Hypercube distribution vector, scaled up + to the input domain range, and containing the 4 corners of the domain. + + :param n: number of samples to generate (including the 4 corners) + :param x1_range: range of the 1st input variable + :param x2_range: range of the 2nd input variable + :param crtrn: criterion for Latin Hypercube sampling + :return: 2xn array of generated samples + ''' + + lh = lhs(2, samples=(n - 4), criterion=crtrn) + corners = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]) + lhc = np.vstack((lh, corners)) + lhc[:, 0] = lhc[:, 0] * (x1_range[1] - x1_range[0]) + x1_range[0] + lhc[:, 1] = lhc[:, 1] * (x2_range[1] - x2_range[0]) + x2_range[0] + return lhc diff --git a/deprecated/kriging/test_pykrige_Vmeff.py b/deprecated/kriging/test_pykrige_Vmeff.py new file mode 100644 index 0000000..77f9751 --- /dev/null +++ b/deprecated/kriging/test_pykrige_Vmeff.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-15 15:59:37 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-04-26 12:08:41 + +''' Fit a kriging model to a discrete 2D map of effective potentials + for various charges and acoustic amplitudes, and use kriging predictor + to generate a new 2D map of effective potentials within the original input range. ''' + +import os +import re +import ntpath +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +from scipy.spatial.distance import pdist, squareform +from utils import OpenFilesDialog, rescale, rmse +from pykrige.ok import OrdinaryKriging +import pykrige.kriging_tools as kt + + +class Variable: + ''' dummy class to contain information about the variable ''' + + name = '' + unit = '' + lookup = '' + factor = 1. + max_error = 0. + + def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): + self.name = var_name + self.unit = var_unit + self.factor = var_factor + self.lookup = var_lookup + self.max_error = var_max_error + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz charge extended/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') + +# Set data variable and Kriging parameters +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e-10) +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-8) +nQ_sparse_target = 30 +namps_sparse_target = 10 + +plot_all = True + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing lookup tables') + nfiles = len(lookup_filepaths) + amps = np.empty(nfiles) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + if i == 0: + Qm = lookup['Q'] + nQ = np.size(Qm) + var = np.empty((nfiles, nQ)) + var[i, :] = lookup[varinf.lookup] + else: + if np.array_equal(Qm, lookup['Q']): + var[i, :] = lookup[varinf.lookup] + else: + print('Error: charge vector not consistent') + + # Compute data metrics + namps = amps.size + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + varmin = np.amin(var) + varmax = np.amax(var) + print('Initial data:', nQ, 'charges,', namps, 'amplitudes') + + # Resample arrays + print('resampling arrays') + assert nQ_sparse_target <= nQ and namps_sparse_target <= namps + Qm_sampling_factor = int(nQ / nQ_sparse_target) + amps_sampling_factor = int(namps / namps_sparse_target) + Qm_sparse = Qm[::Qm_sampling_factor] + amps_sparse = amps[::amps_sampling_factor] + nQ_sparse = Qm_sparse.size + namps_sparse = amps_sparse.size + var_sparse = var[::amps_sampling_factor, ::Qm_sampling_factor] + Qmin_sparse = np.amin(Qm_sparse) + Qmax_sparse = np.amax(Qm_sparse) + Amin_sparse = np.amin(amps_sparse) + Amax_sparse = np.amax(amps_sparse) + print('Sparse data:', nQ_sparse, 'charges,', namps_sparse, 'amplitudes') + + # Normalize and serialize + print('normalizing and serializing sparse data') + Qm_sparse_norm = rescale(Qm_sparse, Qmin_sparse, Qmax_sparse) + amps_sparse_norm = rescale(amps_sparse, Amin_sparse, Amax_sparse) + Qm_sparse_norm_grid, amps_sparse_norm_grid = np.meshgrid(Qm_sparse_norm, amps_sparse_norm) + Qm_sparse_norm_ser = np.reshape(Qm_sparse_norm_grid, nQ_sparse * namps_sparse) + amps_sparse_norm_ser = np.reshape(amps_sparse_norm_grid, nQ_sparse * namps_sparse) + var_sparse_ser = np.reshape(var_sparse, nQ_sparse * namps_sparse) + + # Compute normalized distance matrix and data semivariogram + # print('computing normalized distance matrix and data semi-variogram') + # norm_dist = squareform(pdist(np.array([amps_sparse_norm_ser, Qm_sparse_norm_ser]).transpose())) + # N = norm_dist.shape[0] + # norm_dist_ub = 1.6 + # assert np.amax(norm_dist) < norm_dist_ub,\ + # 'Error: max normalized distance greater than semi-variogram upper bound' + # bw = 0.1 # lag bandwidth + # lags = np.arange(0, 1.6, bw) # lag array + # nlags = lags.size + # sv = np.empty(nlags) + # for k in range(nlags): + # # print('lag = ', lags[k]) + # Z = list() + # for i in range(N): + # for j in range(i + 1, N): + # if norm_dist[i, j] >= lags[k] - bw and norm_dist[i, j] <= lags[k] + bw: + # Z.append((var_sparse_ser[i] - var_sparse_ser[j])**2.0) + # sv[k] = np.sum(Z) / (2.0 * len(Z)) + + + # Fit kriging model + print('fitting kriging model to sparse data') + OK = OrdinaryKriging(amps_sparse_norm_ser, Qm_sparse_norm_ser, var_sparse_ser, + variogram_model='linear') + + # Proof-of-concept: dummy prediction at known values of charge and amplitude + print('re-computing sparse data from kriging predictor') + var_sparse_krig, _ = OK.execute('grid', rescale(amps_sparse, Amin_sparse, Amax_sparse), + rescale(Qm_sparse, Qmin_sparse, Qmax_sparse)) + var_sparse_krig = var_sparse_krig.transpose() + var_sparse_max_abs_error = np.amax(np.abs(var_sparse - var_sparse_krig)) * varinf.factor + assert var_sparse_max_abs_error < varinf.max_error,\ + 'High Kriging error in training set ({:.2e} {})'.format(var_sparse_max_abs_error, + varinf.unit) + + # Predict data at unknown values + print('re-computing original data from kriging predictor') + var_krig, var_krig_ss = OK.execute('grid', rescale(amps, Amin, Amax), rescale(Qm, Qmin, Qmax)) + var_krig = var_krig.transpose() + var_krig_ss = var_krig_ss.transpose() + var_krig_std = np.sqrt(var_krig_ss) + var_krig_std_min = np.amin(var_krig_std) + var_krig_std_max = np.amax(var_krig_std) + varmin = np.amin([varmin, np.amin(var_krig)]) + varmax = np.amin([varmax, np.amax(var_krig)]) + var_levels = np.linspace(varmin, varmax, 20) * varinf.factor + var_abs_diff = np.abs(var - var_krig) + var_abs_diff_max = np.amax(var_abs_diff) + var_diff_levels = np.linspace(0., np.amax(var_abs_diff), 20) * varinf.factor + var_std_levels = np.linspace(0., np.amax(var_krig_std_max), 20) * varinf.factor + + # Compare original and predicted profiles + print('comparing original and predicted profiles') + var_rmse = rmse(var, var_krig) * varinf.factor + var_max_abs_error = np.amax(np.abs(var - var_krig)) * varinf.factor + print('RMSE = {:.2f} {}, MAE = {:.2f} {}'.format(var_rmse, varinf.unit, + var_max_abs_error, varinf.unit)) + + # Plotting + print('plotting') + + mymap = cm.get_cmap('viridis') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, + varmax * varinf.factor)) + sm_var._A = [] + sm_var_diff = plt.cm.ScalarMappable(cmap=mymap, + norm=plt.Normalize(0., var_abs_diff_max * varinf.factor)) + sm_var_diff._A = [] + sm_var_std = plt.cm.ScalarMappable(cmap=mymap, + norm=plt.Normalize(var_krig_std_min * varinf.factor, + var_krig_std_max * varinf.factor)) + sm_var_std._A = [] + + if plot_all: + + + # True function map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_title('$' + varinf.name + '(Q_m,\ A_{drive})$ map', fontsize=20) + ax.contourf(Qm * 1e5, amps * 1e-3, var * varinf.factor, levels=var_levels, cmap='viridis') + xgrid, ygrid, = np.meshgrid(Qm * 1e5, amps * 1e-3) + ax.scatter(xgrid, ygrid, c='black', s=5) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + + # True function profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('$' + varinf.name + '(Q_m)$ for different amplitudes', fontsize=20) + for i in range(namps): + ax.plot(Qm * 1e5, var[i, :] * varinf.factor, c=mymap(rescale(amps[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + + # Sparse function profiles + # fig, ax = plt.subplots(figsize=(10, 6)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + # ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + # ax.set_title('sparse $' + varinf.name + '(Q_m)$ for different amplitudes', fontsize=20) + # for i in range(namps_sparse): + # ax.plot(Qm_sparse * 1e5, var_sparse[i, :] * varinf.factor, + # c=mymap(rescale(amps_sparse[i], Amin, Amax))) + # fig.subplots_adjust(right=0.85) + # cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + # fig.add_axes() + # fig.colorbar(sm_amp, cax=cbar_ax) + # cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + + # 3: sparse var(Qm, Adrive) scattered map + # fig, ax = plt.subplots(figsize=(10, 6)) + # ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + # ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + # ax.set_title('sparse $' + varinf.name + '(Q_m,\ A_{drive})$ scattered map', fontsize=20) + # xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) + # ax.scatter(xgrid, ygrid, c=var_sparse * varinf.factor, cmap='viridis') + # fig.subplots_adjust(right=0.85) + # cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + # fig.add_axes() + # fig.colorbar(sm_var, cax=cbar_ax) + # cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + + # # 4: data semivariogram + # fig, ax = plt.subplots(figsize=(10, 6)) + # ax.set_xlabel('Normalized lag', fontsize=20) + # ax.set_ylabel('Semivariance', fontsize=20) + # ax.set_title('Semivariogram', fontsize=20) + # ax.plot(lags, sv, '.-') + + # Estimate map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_title('$' + varinf.name + '(Q_m,\ A_{drive})$ estimate map', fontsize=20) + ax.contourf(Qm * 1e5, amps * 1e-3, var_krig * varinf.factor, levels=var_levels, + cmap='viridis') + xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) + ax.scatter(xgrid, ygrid, c='black', s=5) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + + + # 5: Prediction: more dense Vm_krig(Qm) plots for each Adrive + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Kriging: prediction of original $' + varinf.name + '(Q_m)$ profiles', + fontsize=20) + for i in range(namps): + ax.plot(Qm * 1e5, var_krig[i, :] * varinf.factor, c=mymap(rescale(amps[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + + # 6: Vm(Qm, Adrive) kriging error contour map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Kriging error: $' + varinf.name + '(Q_m,\ A_{drive})$ contour map', fontsize=20) + ax.contourf(Qm * 1e5, amps * 1e-3, var_abs_diff * varinf.factor, levels=var_diff_levels, + cmap='viridis') + xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) + ax.scatter(xgrid, ygrid, c='black', s=5) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var_diff, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ abs.\ error\ (' + varinf.unit + ')$', fontsize=20) + + # 6: Vm(Qm, Adrive) kriging predicted error contour map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Kriging predicted error: $' + varinf.name + '(Q_m,\ A_{drive})$ contour map', fontsize=20) + ax.contourf(Qm * 1e5, amps * 1e-3, var_krig_std * varinf.factor, cmap='viridis') + xgrid, ygrid, = np.meshgrid(Qm_sparse * 1e5, amps_sparse * 1e-3) + ax.scatter(xgrid, ygrid, c='black', s=5) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var_std, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ abs.\ error\ (' + varinf.unit + ')$', fontsize=20) + + plt.show() diff --git a/deprecated/kriging/test_pykriging1D.py b/deprecated/kriging/test_pykriging1D.py new file mode 100644 index 0000000..35fe91c --- /dev/null +++ b/deprecated/kriging/test_pykriging1D.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-04-24 11:04:39 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-05-26 13:44:14 + +''' Predict a 1D Vmeff profile using the PyKriging module. ''' + +import pickle +import numpy as np +import matplotlib.pyplot as plt +from pyKriging.krige import kriging + + +class Variable: + ''' dummy class to contain information about the variable ''' + + name = '' + unit = '' + lookup = '' + factor = 1. + max_error = 0. + + def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): + self.name = var_name + self.unit = var_unit + self.factor = var_factor + self.lookup = var_lookup + self.max_error = var_max_error + + +# Set data variable and Kriging parameters +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-1) +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) +# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) + + +# Define true function by interpolation from specific profile +def f(x): + return np.interp(x, Qm, xvect) + + +# Load coefficient profile +dirpath = 'C:/Users/admin/Google Drive/PhD/NBLS model/Output/lookups 0.35MHz charge extended/' +filepath = dirpath + 'lookups_a32.0nm_f350.0kHz_A100.0kPa_dQ1.0nC_cm2.pkl' +filepath0 = dirpath + 'lookups_a32.0nm_f350.0kHz_A0.0kPa_dQ1.0nC_cm2.pkl' +with open(filepath, 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + xvect = lookup[varinf.lookup] +with open(filepath0, 'rb') as fh: + lookup = pickle.load(fh) + xvect0 = lookup[varinf.lookup] + +# xvect = xvect - xvect0 + + +print('defining estimation vector') +x = np.atleast_2d(np.linspace(-150., 150., 1000) * 1e-5).T +y = f(x).ravel() + +print('defining prediction vector') +X0 = np.atleast_2d(np.linspace(-150., 150., 10) * 1e-5).T +Y0 = f(X0).ravel() + +print('creating kriging model') +k = kriging(X0, Y0) + +print('training kriging model') +k.train() + +print('predicting') +y_pred0 = np.array([k.predict(xx) for xx in x]) + +X = X0 +Y = Y0 + +numiter = 10 +for i in range(numiter): + print('Infill iteration {0} of {1}....'.format(i + 1, numiter)) + newpoints = k.infill(1, method='error') + for point in newpoints: + newX = k.inversenormX(point) + newY = f(newX)[0] + print('adding point ({:.3f}, {:.3f})'.format(newX[0] * 1e5, newY * varinf.factor)) + X = np.append(X, [newX], axis=0) + Y = np.append(Y, newY) + k.addPoint(newX, newY, norm=True) + k.train() + +y_pred = np.array([k.predict(xx) for xx in x]) + +fig, ax = plt.subplots() +ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'true function') +ax.plot(X0 * 1e5, Y0 * varinf.factor, 'r.', markersize=10, label=u'Initial observations') +ax.plot(x * 1e5, y_pred0 * varinf.factor, 'b-', label=u'Initial prediction') +ax.set_xlabel('$Q_m\ (nC/cm^2)$') +ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') +ax.legend() + + +fig, ax = plt.subplots() +ax.plot(x * 1e5, y * varinf.factor, 'r:', label=u'true function') +ax.plot(X * 1e5, Y * varinf.factor, 'r.', markersize=10, label=u'Final observations') +ax.plot(x * 1e5, y_pred * varinf.factor, 'b-', label=u'Final prediction') +ax.set_xlabel('$Q_m\ (nC/cm^2)$') +ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$') +ax.legend() + + +plt.show() + diff --git a/deprecated/kriging/test_pykriging2D.py b/deprecated/kriging/test_pykriging2D.py new file mode 100644 index 0000000..c98bf1f --- /dev/null +++ b/deprecated/kriging/test_pykriging2D.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-04-24 11:04:39 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-05-26 14:30:02 + +''' Predict a 1D Vmeff profile using the PyKriging module. ''' + +import os, ntpath +import pickle +import re +import numpy as np +from scipy.interpolate import griddata +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from pyKriging.krige import kriging +from utils import OpenFilesDialog, rescale, rmse + + +class Variable: + ''' dummy class to contain information about the variable ''' + + name = '' + unit = '' + lookup = '' + factor = 1. + max_error = 0. + + def __init__(self, var_name, var_unit, var_lookup, var_factor, var_max_error): + self.name = var_name + self.unit = var_unit + self.factor = var_factor + self.lookup = var_lookup + self.max_error = var_max_error + + +# Set data variable and Kriging parameters +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1e-1) +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) +# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) + + +# Define true function by interpolation from specific profile +def f(x): + return griddata(points, values, x, method='linear', rescale=True) + + +# Select data files (PKL) +lookup_root = '../Output/lookups 0.35MHz charge extended/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths = OpenFilesDialog(lookup_absroot, 'pkl') +rgxp = re.compile('lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz_A(\d*.\d*)kPa_dQ(\d*.\d*)nC_cm2.pkl') +pltdir = 'C:/Users/admin/Desktop/PyKriging output/' + +# Set data variable and Kriging parameters +varinf = Variable('V_{m, eff}', 'mV', 'V_eff', 1., 1.0) +# varinf = Variable('\\alpha_{m, eff}', 'ms^{-1}', 'alpha_m_eff', 1e-3, 1e4) +# varinf = Variable('\\beta_{m, eff}', 'ms^{-1}', 'beta_m_eff', 1e-3, 5e0) +# varinf = Variable('\\alpha_{h, eff}', 'ms^{-1}', 'alpha_h_eff', 1e-3, 1e1) +# varinf = Variable('\\beta_{h, eff}', 'ms^{-1}', 'beta_h_eff', 1e-3, 1e1) + +# Check dialog output +if not lookup_filepaths: + print('error: no lookup table selected') +else: + print('importing lookup tables') + nfiles = len(lookup_filepaths) + amps = np.empty(nfiles) + + for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + Fdrive = float(mo.group(2)) * 1e3 + Adrive = float(mo.group(3)) * 1e3 + dQ = float(mo.group(4)) * 1e-2 + amps[i] = Adrive + if Adrive == 0: + baseline_ind = i + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + if i == 0: + Qm = lookup['Q'] + nQ = np.size(Qm) + var = np.empty((nfiles, nQ)) + var[i, :] = lookup[varinf.lookup] + else: + if np.array_equal(Qm, lookup['Q']): + var[i, :] = lookup[varinf.lookup] + else: + print('Error: charge vector not consistent') + + # Compute data metrics + namps = amps.size + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + varmin = np.amin(var) + varmax = np.amax(var) + print('Initial data:', nQ, 'charges,', namps, 'amplitudes') + + # Define points for interpolation function + Q_mesh, A_mesh = np.meshgrid(Qm, amps) + points = np.column_stack([A_mesh.flatten(), Q_mesh.flatten()]) + values = var.flatten() + + # Define algorithmic parameters + n_iter_min = 10 + n_iter_max = 30 + MAE_pred = [] + MAE_true = [] + RMSE_true = [] + + # Define estimation matrix + nAest = 20 + nQest = 100 + print('Initial estimation matrix:', nQest, 'charges,', nAest, 'amplitudes') + Aest = np.linspace(Amin, Amax, nAest) + Qest = np.linspace(Qmin, Qmax, nQest) + Qest_mesh, Aest_mesh = np.meshgrid(Qest, Aest) + x = np.column_stack([Aest_mesh.flatten(), Qest_mesh.flatten()]) + ytrue = f(x).ravel().reshape((nAest, nQest)) + + # Define initial observation matrix + nAobs = 5 + nQobs = 20 + print('Initial estimation matrix:', nQobs, 'charges,', nAobs, 'amplitudes') + Aobs = np.linspace(Amin, Amax, nAobs) + Qobs = np.linspace(Qmin, Qmax, nQobs) + Qobs_mesh, Aobs_mesh = np.meshgrid(Qobs, Aobs) + X0 = np.column_stack([Aobs_mesh.flatten(), Qobs_mesh.flatten()]) + Y0 = f(X0).ravel() + + print('creating Kriging model') + k = kriging(X0, Y0) + + print('initial training') + k.train() + + print('predicting') + y0 = np.array([k.predict(xx) for xx in x]) + y0 = y0.reshape((nAest, nQest)) + + X = X0 + Y = Y0 + + n_iter = 10 + for i in range(n_iter): + print('Infill iteration {0} of {1}....'.format(i + 1, n_iter)) + newpoints = k.infill(2, method='error') + for point in newpoints: + newX = k.inversenormX(point) + newY = f(newX)[0] + print('adding point (({:.3f}, {:.3f}), {:.3f})'.format( + newX[0] * 1e-3, newX[1] * 1e5, newY * varinf.factor)) + X = np.append(X, [newX], axis=0) + Y = np.append(Y, newY) + k.addPoint(newX, newY, norm=True) + k.train() + + y = np.array([k.predict(xx) for xx in x]) + y = y.reshape((nAest, nQest)) + + # Plotting + mymap = cm.get_cmap('viridis') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + var_levels = np.linspace(varmin, varmax, 20) * varinf.factor + sm_var = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(varmin * varinf.factor, varmax * varinf.factor)) + sm_var._A = [] + + # True function profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('True function profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, ytrue[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig1.png', format='png') + + # True function map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('True function map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, ytrue * varinf.factor, levels=var_levels, + cmap='viridis') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig2.png', format='png') + + # Initial estimation profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Initial estimation profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, y0[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig3.png', format='png') + + # Initial estimation map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Initial estimation map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y0 * varinf.factor, levels=var_levels, + cmap='viridis') + ax.scatter(X0[:, 1] * 1e5, X0[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig4.png', format='png') + + # Final estimation profiles + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + ax.set_title('Final estimation profiles', fontsize=20) + for i in range(nAest): + ax.plot(Qest * 1e5, y[i, :] * varinf.factor, c=mymap(rescale(Aest[i], Amin, Amax))) + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + fig.savefig(pltdir + 'fig7.png', format='png') + + # Final estimation map + fig, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=20) + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=20) + ax.set_title('Final estimation map', fontsize=20) + ax.contourf(Qest * 1e5, Aest * 1e-3, y * varinf.factor, levels=var_levels, + cmap='viridis') + ax.scatter(X[:, 1] * 1e5, X[:, 0] * 1e-3, c='black') + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_var, cax=cbar_ax) + cbar_ax.set_ylabel('$' + varinf.name + '\ (' + varinf.unit + ')$', fontsize=20) + fig.savefig(pltdir + 'fig8.png', format='png') + + +plt.show() + diff --git a/deprecated/miscellaneous/plot_ZeqQS0.py b/deprecated/miscellaneous/plot_ZeqQS0.py new file mode 100644 index 0000000..06c413a --- /dev/null +++ b/deprecated/miscellaneous/plot_ZeqQS0.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-13 14:49:35 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-06-29 18:43:22 + +""" Plot the balanced quasi-static deflection of the system as a function + of charge and gas content, in the absence of acoustic stimulus. """ + +import numpy as np +import matplotlib.pyplot as plt + +import PyNICE +from PyNICE.utils import LoadParams + + +# Initialization: create a NBLS instance +params = LoadParams() +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} +Fdrive = 0.0 # dummy stimulation frequency +Qm0 = -71.9e-5 +bls = PyNICE.BilayerSonophore(geom, params, Fdrive, Qm0) + + +# Define charge and gas content vectors +nQ = 200 +ngas = 10 +charges = np.linspace(-0.8, 0.4, nQ) * 1e-5 +gas = np.linspace(0.5 * bls.ng0, 2.0 * bls.ng0, ngas) + + +# Compute balance deflections vs charges and gas content +ZeqQS = np.empty((ngas, nQ)) +for i in range(ngas): + for j in range(nQ): + ZeqQS[i, j] = bls.balancedefQS(gas[i], charges[j]) + + +# Plotting +fig, ax = plt.subplots() +fig.canvas.set_window_title("balance deflection vs. charge") +ax.set_xlabel('$Q_m\ (nC/cm^2)$', fontsize=18) +ax.set_ylabel('$Z_{eq}\ (nm)$', fontsize=18) +for i in range(ngas): + ax.plot(charges * 1e5, ZeqQS[i, :] * 1e9, + label='ng = {:.2f}e-22 mole'.format(gas[i] * 1e22)) +ax.legend(fontsize=18) +plt.show() diff --git a/deprecated/miscellaneous/postpro_rmse_charge.py b/deprecated/miscellaneous/postpro_rmse_charge.py new file mode 100644 index 0000000..32077d1 --- /dev/null +++ b/deprecated/miscellaneous/postpro_rmse_charge.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-11-01 16:35:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-06-07 16:14:54 + +""" Compute RMSE between charge profiles of NBLS output. """ + +import sys +import os +import pickle +import ntpath +import numpy as np + +sys.path.append('C:/Users/admin/Google Drive/PhD/NICE model/NICEPython') +from NICE.utils import OpenFilesDialog, rmse, find_nearest +from NICE.constants import * + +# Define options +pkl_root = "../../Output/test NBLS/" +t_offset = 10e-3 # s + +# Select data files (PKL) +abs_root = os.path.abspath(pkl_root) +pkl_filepaths = OpenFilesDialog(abs_root, 'pkl') + +# Check dialog output +if not pkl_filepaths: + print('error: no input file') +elif len(pkl_filepaths) > 2: + print('error: cannot compare more than 2 methods') +else: + # Load data from file 1 + pkl_filename = ntpath.basename(pkl_filepaths[0]) + print('Loading data from "' + pkl_filename + '"') + pkl_file = open(pkl_filepaths[0], 'rb') + data = pickle.load(pkl_file) + pkl_file.close() + t1 = data['t'] + tstim1 = data['tstim'] + f1 = data['Fdrive'] + A1 = data['Adrive'] + Q1 = data['Qm'] * 1e2 # nC/cm2 + states1 = data['states'] + + # Load data from file 2 + pkl_filename = ntpath.basename(pkl_filepaths[1]) + print('Loading data from "' + pkl_filename + '"') + pkl_file = open(pkl_filepaths[1], 'rb') + data = pickle.load(pkl_file) + pkl_file.close() + t2 = data['t'] + tstim2 = data['tstim'] + f2 = data['Fdrive'] + A2 = data['Adrive'] + Q2 = data['Qm'] * 1e2 # nC/cm2 + states2 = data['states'] + + if tstim1 != tstim2 or f1 != f2 or A1 != A2: + print('error: different stimulation conditions') + else: + print('comparing charge profiles') + + T = 1 / f1 + ttot = tstim1 # + toffset + ncycles = int(ttot / T) + tcycles1 = np.empty(ncycles) + tcycles2 = np.empty(ncycles) + Qcycles1 = np.empty(ncycles) + Qcycles2 = np.empty(ncycles) + i1 = 1 + i2 = 1 + icycle = 0 + while icycle < ncycles: + tcycles1[icycle] = t1[i1] + tcycles2[icycle] = t2[i2] + Qcycles1[icycle] = Q1[i1] + Qcycles2[icycle] = Q2[i2] + if states1[i1 + 1] == 0: + i1 += NPC_HH + else: + i1 += NPC_FULL + if states2[i2 + 1] == 0: + i2 += NPC_HH + else: + i2 += NPC_FULL + icycle += 1 + + t_error = rmse(tcycles1, tcycles2) + print('method 1: rmse = %f us' % (t_error * 1e6)) + Q_error = rmse(Qcycles1, Qcycles2) + print('method 1: rmse = %f nC/cm2' % Q_error) + + # determining optimal slices + tslice = 5e-4 + ttot = tstim1 + t_offset + slices = np.arange(0.0, ttot, tslice) + nslices = slices.size + print('%u comparison instants' % nslices) + + # determining real slices + icomp1 = [] + icomp2 = [] + for i in range(nslices): + (islice1, tslice1) = find_nearest(t1, slices[i]) + (islice2, tslice2) = find_nearest(t2, slices[i]) + icomp1.append(islice1) + icomp2.append(islice2) + + # Comparing charge values + Qcomp1 = Q1[icomp1] + Qcomp2 = Q2[icomp2] + Q_error = rmse(Qcomp1, Qcomp2) + print('method 2: rmse = %f nC/cm2' % Q_error) diff --git a/deprecated/miscellaneous/test_downsampling.py b/deprecated/miscellaneous/test_downsampling.py new file mode 100644 index 0000000..1700698 --- /dev/null +++ b/deprecated/miscellaneous/test_downsampling.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-08 21:26:06 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-06-08 11:29:54 + +""" Test different signal downsampling strategies """ + +import sys +import pickle +import ntpath +import time +from scipy import signal +import numpy as np +import matplotlib.pyplot as plt + +sys.path.append('C:/Users/admin/Google Drive/PhD/NICE model/NICEPython') +import NICE.core as nblscore +from NICE.utils import OpenFilesDialog, DownSample + +# Define options +pkl_root = "../Output/test HH/" +plt_root = "../Output/test HH/" +plt_show = 0 +plt_save = 1 +plt_askbeforesave = 0 + +t_unit = 'us' +t_factor = 1e6 +ind_tmax = -1 + + +# Select data files (PKL) +pkl_filepaths = OpenFilesDialog(pkl_root, 'pkl') + +# Check dialog output +if not pkl_filepaths: + print('error: no input file') +else: + # Loop through data files + for pkl_filepath in pkl_filepaths: + + # Get code from file name + pkl_filename = ntpath.basename(pkl_filepath) + filecode = pkl_filename[0:-4] + + # Load data + print('Loading data from "' + pkl_filename + '"') + pkl_file = open(pkl_filepath, 'rb') + data = pickle.load(pkl_file) + + # Extract variables + print('Extracting variables') + Adrive = data['Adrive'] + Fdrive = data['Fdrive'] + phi = data['phi'] + dt = data['dt'] + t = data['t'] + U = data['U'] + Z = data['Z'] + Vm = data['Vm'] + a = data['a'] + d = data['d'] + params = data['params'] + geom = {"a": a, "d": d} + + npc = int(1 / (Fdrive * dt)) + print(npc, t.size) + + # Create dummy BLS instance to use functions + nbls = nblscore.NeuronalBilayerSonophore(geom, params, Fdrive, False) + + # Compute membrane capacitance density + print("computing capacitance") + # Cm = np.array([nbls.Capct(ZZ) for ZZ in Z]) + Cm = Z + + # Filter 1: N-moving average + t0 = time.time() + N = int(0.03 / (dt * Fdrive)) + if N % 2 == 0: + N += 1 + npad = int((N - 1) / 2) + Cm_begin_padding = Cm[-(npad + 2):-2] + Cm_end_padding = Cm[1:npad + 1] + Cm_ext = np.concatenate((Cm_begin_padding, Cm, Cm_end_padding), axis=0) + mav = np.ones(N) / N + Cm_filtMAV = np.convolve(Cm_ext, mav, mode='valid') + print('Moving-average method: ' + '{:.2e}'.format(time.time() - t0) + ' s') + + # Filter 2: lowpass Butterworth + t0 = time.time() + fc = Fdrive * 20 + nyq = 0.5 / dt # Nyquist frequency + fcn = fc / nyq + btw_order = 5 # Butterworth filter order + filtb, filta = signal.butter(btw_order, fcn) + Cm_filtBW = signal.filtfilt(filtb, filta, Cm) + print('Butterworth method: ' + '{:.2e}'.format(time.time() - t0) + ' s') + + # Filter 3: FFT lowpass cutoff + t0 = time.time() + Cm_ft = np.fft.rfft(Cm) + W = np.fft.rfftfreq(Cm.size, d=dt) + cut_Cm_ft = Cm_ft.copy() + cut_Cm_ft[(W > fc)] = 0 + Cm_filtFFT = np.fft.irfft(cut_Cm_ft, n=Cm.size) + print('FFT cutoff method: ' + '{:.2e}'.format(time.time() - t0) + ' s') + Cm_IFFT = np.fft.irfft(Cm_ft, n=Cm.size) + + # Extending for 2 periods + t_ext = np.concatenate((t, t + t[-1]), axis=0) + Cm_ext = np.concatenate((Cm, Cm), axis=0) + Cm_filtBW_ext = np.concatenate((Cm_filtBW, Cm_filtBW), axis=0) + Cm_filtMAV_ext = np.concatenate((Cm_filtMAV, Cm_filtMAV), axis=0) + Cm_filtFFT_ext = np.concatenate((Cm_filtFFT, Cm_filtFFT), axis=0) + Cm_IFFT_ext = np.concatenate((Cm_IFFT, Cm_IFFT), axis=0) + + # Down-sampling + npc_ds = 40 + t_ds = np.linspace(t[0], t[-1], npc_ds) + print(t[0], t_ds[0], t[-1], t_ds[-1]) + Cm_ds = signal.resample(Cm, npc_ds, t=None, axis=0, window=None) + Cm_filtMAV_ds = signal.resample(Cm_filtMAV, npc_ds, t=None, axis=0, window=None) + Cm_filtBW_ds = signal.resample(Cm_filtBW, npc_ds, t=None, axis=0, window=None) + Cm_filtFFT_ds = signal.resample(Cm_filtFFT, npc_ds, t=None, axis=0, window=None) + Cm_IFFT_ds = signal.resample(Cm_IFFT, npc_ds, t=None, axis=0, window=None) + + i_ds_custom = np.round(np.linspace(0, t.size - 1, npc_ds)).astype(int) + Cm_filtMAV_ds_custom = Cm_filtMAV[i_ds_custom] + + (t_ds2, Cm_filtMAV_ds_custom2) = DownSample(t, Cm, 0.025 / Fdrive) + + # Extending DS signals for 2 periods + t_ds_ext = np.concatenate((t_ds, t_ds + t_ds[-1]), axis=0) + + print(t_ext[0], t_ds_ext[0], t_ext[-1], t_ds_ext[-1]) + + Cm_ds_ext = np.concatenate((Cm_ds, Cm_ds), axis=0) + Cm_filtBW_ds_ext = np.concatenate((Cm_filtBW_ds, Cm_filtBW_ds), axis=0) + Cm_filtMAV_ds_ext = np.concatenate((Cm_filtMAV_ds, Cm_filtMAV_ds), axis=0) + Cm_filtFFT_ds_ext = np.concatenate((Cm_filtFFT_ds, Cm_filtFFT_ds), axis=0) + Cm_IFFT_ds_ext = np.concatenate((Cm_IFFT_ds, Cm_IFFT_ds), axis=0) + + Cm_filtMAV_ds_custom_ext = np.concatenate((Cm_filtMAV_ds_custom, Cm_filtMAV_ds_custom), + axis=0) + + t_ds2_ext = np.concatenate((t_ds2, t_ds2 + t_ds2[-1]), axis=0) + Cm_filtMAV_ds_custom2_ext = np.concatenate((Cm_filtMAV_ds_custom2, Cm_filtMAV_ds_custom2), + axis=0) + + # Plots + + # fig, axes = plt.subplots(4,1) + + # ax = axes[0] + # ax.set_title(str(npc) + ' samples per cycle', fontsize=28) + # ax.plot(t_ext*t_factor, Cm_ext*1e2, color='black', linewidth=2, label='original') + # ax.plot(t_ext*t_factor, Cm_IFFT_ext*1e2, color='magenta', linewidth=2, + # label='IFFT reconstructed') + # ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=28) + # ax.legend(loc=0, fontsize=28) + + # ax = axes[1] + # ax.plot(t_ext*t_factor, Cm_ext*1e2, color='black', linewidth=2, label='original') + # ax.plot(t_ext*t_factor, Cm_filtMAV_ext*1e2, color='green', linewidth=2, + # label=str(N)+'-moving average') + # ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=28) + # ax.legend(loc=0, fontsize=28) + + # ax = axes[2] + # ax.plot(t_ext*t_factor, Cm_ext*1e2, color='black', linewidth=2, label='original') + # ax.plot(t_ext*t_factor, Cm_filtBW_ext*1e2, color='red', linewidth=2, + # label='{:.2f}'.format(fc*1e-6) + 'MHz lowpass BW') + # ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=28) + # ax.legend(loc=0, fontsize=28) + + # ax = axes[3] + # ax.plot(t_ext*t_factor, Cm_ext*1e2, color='black', linewidth=2, label='original') + # ax.plot(t_ext*t_factor, Cm_filtFFT_ext*1e2, color='blue', linewidth=2, + # label='{:.2f}'.format(fc*1e-6) + 'MHz lowpass FFT') + # ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=28) + # ax.set_xlabel('time (' + t_unit + ')') + # ax.legend(loc=0, fontsize=28) + + fig, ax = plt.subplots() + ax.set_title('Downsampled signals (' + str(npc_ds) + ' samples per cycle)', fontsize=28) + ax.plot(t_ext * t_factor, Cm_ext * 1e2, color='gold', linewidth=2, label='original') + ax.plot(t_ds_ext * t_factor, Cm_ds_ext * 1e2, + color='black', linewidth=2, label='original DS') + # ax.plot(t_ds_ext*t_factor, Cm_filtMAV_ds_ext*1e2, color='green', linewidth=2, + # label='MAV DS') + # ax.plot(t_ds_ext*t_factor, Cm_filtBW_ds_ext*1e2, color='red', linewidth=2, label='BW DS') + # ax.plot(t_ds_ext*t_factor, Cm_filtFFT_ds_ext*1e2, color='blue', linewidth=2, + # label='FFT DS') + ax.plot(t_ds_ext * t_factor, Cm_filtMAV_ds_custom_ext * 1e2, color='magenta', linewidth=2, + label='MAV DS custom') + ax.plot(t_ds2_ext * t_factor, Cm_filtMAV_ds_custom2_ext * 1e2, color='cyan', linewidth=2, + label='MAV DS custom 2') + ax.set_xlabel('time (' + t_unit + ')') + ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=28) + ax.legend() + + # fig, ax = plt.subplots() + # Cm_ext = np.concatenate((Cm, Cm), axis=0) + # Cm_ifft_ext = np.concatenate((cut_Cm, cut_Cm), axis=0) + # Cm_mav_ext = np.concatenate((Cm_mav, Cm_mav), axis=0) + # Cm_filtbw_ext = np.concatenate((Cm_filtbw, Cm_filtbw), axis=0) + # ax.plot(Cm_ext*1e2, color='black', linewidth=3, label='original repeated') + # ax.plot(Cm_mav_ext*1e2, color='green', linewidth=3, label='MAV repeated') + # ax.plot(Cm_filtbw_ext*1e2, color='red', linewidth=3, label='BW repeated') + # ax.plot(Cm_ifft_ext*1e2, color='blue', linewidth=3, label='FFT repeated') + # ax.legend() + + # Plot FFT of capacitance + # fig, ax = plt.subplots() + # ax.set_xscale('log') + # ax.plot(W, np.absolute(Cm_ft), color='black', linewidth=3, label='FFT mod') + # ax.plot([fc, fc], [0, np.amax(np.absolute(Cm_ft))], color='red', linewidth=3, + # label='cutoff') + # ax.set_xlabel('frequency (Hz)') + # ax.legend() + +plt.show() diff --git a/deprecated/miscellaneous/test_intermolecular_fit.py b/deprecated/miscellaneous/test_intermolecular_fit.py new file mode 100644 index 0000000..eb4bc1e --- /dev/null +++ b/deprecated/miscellaneous/test_intermolecular_fit.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-07 16:04:34 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-08 17:04:26 + +""" Test the Lennard-Jones fitting of the average intermolecular pressure """ + +import time +import timeit +import numpy as np +from scipy.optimize import brentq, curve_fit +import matplotlib.pyplot as plt +import logging + +import PyNICE +from PyNICE.utils import LoadParams, LJfit, rsquared, rmse + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PyNICE') +logger.setLevel(logging.DEBUG) + + +def Pmpred_loop(x, a, b, c, d, e): + nx = len(x) + y = np.empty(nx) + for i in range(nx): + y[i] = LJfit(x[i], a, b, c, d, e) + return y + + +def Pminterp_loop(xtest, xtrain, ytrain): + nx = len(xtest) + ytest = np.empty(nx) + for i in range(nx): + ytest[i] = np.interp(xtest[i], xtrain, ytrain) + return ytest + + +# Initialization: create a NBLS instance +params = LoadParams() + +a = 32e-9 +d = 0.0e-6 # embedding tissue thickness (m) +Fdrive = 0.0 # dummy stimulation frequency +Cm0 = 1e-2 # F/m2 +Qm0 = -80e-5 # C/m2 +bls = PyNICE.BilayerSonophore({"a": a, "d": d}, params, Fdrive, Cm0, Qm0) + +quit() + +# Determine deflection range +Pmmax = 100e6 # Pa +ZMinlb = -0.49 * bls.Delta +ZMinub = 0.0 +f = lambda Z, Pmmax: bls.PMavg(Z, bls.curvrad(Z), bls.surface(Z)) - Pmmax +Zmin = brentq(f, ZMinlb, ZMinub, args=(Pmmax), xtol=1e-16) +print('Zmin fit = %f nm (%.2f Delta)' % (Zmin * 1e9, Zmin / bls.Delta)) + +# Create vectors for geometric variables +print('Generating training samples') +Zmax = 2 * bls.a +Z_train = np.hstack((np.arange(Zmin, bls.a / 3, 1e-11), np.arange(bls.a / 3, Zmax, 5e-10))) +Pmavg_train = np.array([bls.PMavg(ZZ, bls.curvrad(ZZ), bls.surface(ZZ)) for ZZ in Z_train]) +print('') + +# Compute optimal nonlinear fit of custom LJ function +print('Fitting LJ function parameters to training data') +x0_guess = 2e-9 +C_guess = 1e4 +nrep_guess = 5.0 +nattr_guess = 3.0 +pguess = (x0_guess, C_guess, nrep_guess, nattr_guess) +popt, _ = curve_fit(lambda x, x0, C, nrep, nattr: + LJfit(x, bls.Delta, x0, C, nrep, nattr), Z_train, Pmavg_train, + p0=pguess, maxfev=10000) +(x0_opt, C_opt, nrep_opt, nattr_opt) = popt +print('') + +# Compute intermolecular pressure vector +print('generating testing samples') +Z_test = np.linspace(Zmin, Zmax, 10000) +t0 = time.time() +Pmavg_test = np.array([bls.PMavg(ZZ, bls.curvrad(ZZ), bls.surface(ZZ)) for ZZ in Z_test]) +tdirect = time.time() - t0 +print('direct time: {} ms'.format(tdirect * 1e3)) +print('') + +nrep = 100 + +print('evaluating model 1 (LJ fit) on testing set') +t0 = time.time() +Pmavg_fit = Pmpred_loop(Z_test, bls.Delta, x0_opt, C_opt, nrep_opt, nattr_opt) +# Pmavg_fit = LJfit(Z_test, bls.Delta, x0_opt, C_opt, nrep_opt, nattr_opt) +tpred = time.time() - t0 +print('pred time: {} ms'.format(tpred * 1e3)) +tpred = timeit.timeit(lambda: Pmpred_loop(Z_test, bls.Delta, x0_opt, C_opt, nrep_opt, nattr_opt), + number=nrep) +print('pred time: {} ms'.format(tpred * 1e3)) +r2 = rsquared(Pmavg_test, Pmavg_fit) +residuals = Pmavg_test - Pmavg_fit +ss_res = np.sum(residuals**2) +N = residuals.size +std_err = np.sqrt(ss_res / N) +print("R-squared opt = " + '{:.10}'.format(r2)) +print("standard error: sigma_err = " + str(std_err)) +print('') + +print('evaluating model 2 (interpolation) on testing set') +t0 = time.time() +Pmavg_interp = Pminterp_loop(Z_test, Z_train, Pmavg_train) +# Pmavg_interp = np.interp(Z_test, Z_train, Pmavg_train) +tinterp = time.time() - t0 +print('interp time: {} ms'.format(tinterp * 1e3)) +tinterp = timeit.timeit(lambda: Pminterp_loop(Z_test, Z_train, Pmavg_train), number=nrep) +print('interp time: {} ms'.format(tinterp * 1e3)) +r2 = rsquared(Pmavg_test, Pmavg_interp) +residuals = Pmavg_test - Pmavg_interp +ss_res = np.sum(residuals**2) +N = residuals.size +std_err = np.sqrt(ss_res / N) +print("R-squared opt = " + '{:.10}'.format(r2)) +print("standard error: sigma_err = " + str(std_err)) +print('') + + +# Plot Pm data and predictions +print('plotting') +fig, ax = plt.subplots() +fig.canvas.set_window_title('a = ' + '{:.2f}'.format(a * 1e9) + ' nm') +ax.plot(Z_test * 1e9, Pmavg_test * 1e-6, label="direct") +ax.plot(Z_test * 1e9, Pmavg_fit * 1e-6, '-', linewidth=2, label='fit') +ax.plot(Z_test * 1e9, Pmavg_interp * 1e-6, '-', linewidth=2, label='interpolated') +ax.plot(Z_test * 1e9, np.abs(Pmavg_test - Pmavg_fit) * 1e-6, '-', linewidth=2, label='fit error') +ax.plot(Z_test * 1e9, np.abs(Pmavg_test - Pmavg_interp) * 1e-6, '-', linewidth=2, label='interp error') +ax.set_xlabel('deflection (nm)') +ax.set_ylabel('pressure (MPa)') +ax.grid(True) +ax.legend() + +plt.show() diff --git a/deprecated/miscellaneous/test_interp_speed.py b/deprecated/miscellaneous/test_interp_speed.py new file mode 100644 index 0000000..1a52ae7 --- /dev/null +++ b/deprecated/miscellaneous/test_interp_speed.py @@ -0,0 +1,20 @@ +from scipy.interpolate import interp1d +import numpy as np +import timeit + + +x = np.linspace(1, 10000, 5) +f = lambda x: x**2 +y = f(x) + +xnew = 1234 +u = timeit.timeit(lambda: np.interp(xnew, x, y)) +print(u) + +u = timeit.timeit(lambda: interp1d(x, y)) +print(u) + +h = interp1d(x, y) + +u = timeit.timeit(lambda: h(xnew)) +print(u) \ No newline at end of file diff --git a/deprecated/update_lookups.py b/deprecated/update_lookups.py new file mode 100644 index 0000000..7696cf9 --- /dev/null +++ b/deprecated/update_lookups.py @@ -0,0 +1,57 @@ +import os +import pickle +import numpy as np + +# Define frequencies +# freqs = np.append(np.arange(50, 1001, 50), 690.0) * 1e3 +freqs = np.arange(50, 1001, 50) * 1e3 + +# Locate lookup files +for Fdrive in freqs: + lookup_file_in = 'Tcell_lookups_a32.0nm_f{:.1f}kHz.pkl'.format(Fdrive * 1e-3) + lookup_file_out = 'LeechT_lookups_a32.0nm_f{:.1f}kHz.pkl'.format(Fdrive * 1e-3) + lookup_path_in = 'lookups/LeechT/{}'.format(lookup_file_in) + lookup_path_out = 'lookups/LeechT/{}'.format(lookup_file_out) + + # Load coefficients + assert os.path.isfile(lookup_path_in), 'Error: no lookup file for these stimulation parameters' + print('modifying dict keys in "{}"'.format(lookup_path_in)) + + # Load file + with open(lookup_path_in, 'rb') as fh: + coeffs = pickle.load(fh) + + print('keys:') + print(coeffs.keys()) + + # m-gate + coeffs['alpham'] = coeffs['miTm_Na'] + coeffs['betam'] = coeffs['invTm_Na'] - coeffs['alpham'] + coeffs.pop('miTm_Na') + coeffs.pop('invTm_Na') + + # h-gate + coeffs['alphah'] = coeffs['hiTh_Na'] + coeffs['betah'] = coeffs['invTh_Na'] - coeffs['alpham'] + coeffs.pop('hiTh_Na') + coeffs.pop('invTh_Na') + + # n-gate + coeffs['alphan'] = coeffs['miTm_K'] + coeffs['betan'] = coeffs['invTm_K'] - coeffs['alphan'] + coeffs.pop('miTm_K') + coeffs.pop('invTm_K') + + # s-gate + coeffs['alphas'] = coeffs['miTm_Ca'] + coeffs['betas'] = coeffs['invTm_Ca'] - coeffs['alphas'] + coeffs.pop('miTm_Ca') + coeffs.pop('invTm_Ca') + + + print('new keys:') + print(coeffs.keys()) + + # Save new dict in file + with open(lookup_path_out, 'wb') as fh: + pickle.dump(coeffs, fh) diff --git a/docs/PointNICE.SolverElec.rst b/docs/PointNICE.SolverElec.rst new file mode 100644 index 0000000..da07e6a --- /dev/null +++ b/docs/PointNICE.SolverElec.rst @@ -0,0 +1,10 @@ +Electrical stimulation +------------------------- + +.. automodule:: PointNICE.solvers.SolverElec + +.. autoclass:: SolverElec + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/PointNICE.SolverUS.rst b/docs/PointNICE.SolverUS.rst new file mode 100644 index 0000000..b5636e0 --- /dev/null +++ b/docs/PointNICE.SolverUS.rst @@ -0,0 +1,10 @@ +US stimulation +------------------------- + +.. automodule:: PointNICE.solvers.SolverUS + +.. autoclass:: SolverUS + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/PointNICE.base.rst b/docs/PointNICE.base.rst new file mode 100644 index 0000000..4739653 --- /dev/null +++ b/docs/PointNICE.base.rst @@ -0,0 +1,9 @@ +Standard Mechanism API +------------------------- + +.. automodule:: PointNICE.channels.base + +.. autoclass:: BaseMech + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/PointNICE.bls.rst b/docs/PointNICE.bls.rst new file mode 100644 index 0000000..1208896 --- /dev/null +++ b/docs/PointNICE.bls.rst @@ -0,0 +1,7 @@ +Bilayer Sonophore +---------------- + +.. automodule:: PointNICE.bls +.. autoclass:: BilayerSonophore + :members: + diff --git a/docs/PointNICE.channels.rst b/docs/PointNICE.channels.rst new file mode 100644 index 0000000..1b5ca5f --- /dev/null +++ b/docs/PointNICE.channels.rst @@ -0,0 +1,10 @@ +Channels +------------------------- + +.. toctree:: + :maxdepth: 2 + + PointNICE.base + PointNICE.cortical + PointNICE.thalamic + PointNICE.leech diff --git a/docs/PointNICE.cortical.rst b/docs/PointNICE.cortical.rst new file mode 100644 index 0000000..3b20cff --- /dev/null +++ b/docs/PointNICE.cortical.rst @@ -0,0 +1,24 @@ +Cortical neurons +------------------------- + +.. automodule:: PointNICE.channels.cortical + +.. autoclass:: Cortical + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: CorticalRS + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: CorticalFS + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: CorticalLTS + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/PointNICE.leech.rst b/docs/PointNICE.leech.rst new file mode 100644 index 0000000..47e79f5 --- /dev/null +++ b/docs/PointNICE.leech.rst @@ -0,0 +1,9 @@ +Leech neurons +------------------------- + +.. automodule:: PointNICE.channels.leech + +.. autoclass:: LeechTouch + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/PointNICE.solvers.rst b/docs/PointNICE.solvers.rst new file mode 100644 index 0000000..9be4200 --- /dev/null +++ b/docs/PointNICE.solvers.rst @@ -0,0 +1,9 @@ +Solvers +------------------------- + +.. toctree:: + :maxdepth: 2 + + PointNICE.SolverElec + PointNICE.SolverUS + diff --git a/docs/PointNICE.thalamic.rst b/docs/PointNICE.thalamic.rst new file mode 100644 index 0000000..7f789c9 --- /dev/null +++ b/docs/PointNICE.thalamic.rst @@ -0,0 +1,14 @@ +Thalamic neurons +------------------------- + +.. automodule:: PointNICE.channels.thalamic + +.. autoclass:: Thalamic + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: ThalamicRE + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/PointNICE.utils.rst b/docs/PointNICE.utils.rst new file mode 100644 index 0000000..9a87e6f --- /dev/null +++ b/docs/PointNICE.utils.rst @@ -0,0 +1,5 @@ +Utils +---------------- + +.. automodule:: PointNICE.utils + :members: diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..dbe100d --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,26 @@ +***************************** +PointNICE Project +***************************** + +.. include:: ../README.md + +Modules: +========== + +.. toctree:: + :maxdepth: 2 + + PointNICE.bls + PointNICE.solvers + PointNICE.channels + PointNICE.utils + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/plot/plot_ESTIM_anim.py b/plot/plot_ESTIM_anim.py new file mode 100644 index 0000000..67d9187 --- /dev/null +++ b/plot/plot_ESTIM_anim.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-11 20:35:38 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 17:25:23 + +""" Run simulations of the HH system with injected electric current, +and plot resulting dynamics. """ + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import matplotlib.patches as patches + +from PointNICE.solvers import SolverElec +from PointNICE.channels import * + + +# -------------- SIMULATION ----------------- + + +# Create channels mechanism +neuron = ThalamoCortical() +for i in range(len(neuron.states_names)): + print('{}0 = {:.2f}'.format(neuron.states_names[i], neuron.states0[i])) + + +# Set pulse parameters +tstim = 500e-3 # s +toffset = 300e-3 # s +Amin = -20.0 +Amax = 20.0 +amps = np.arange(Amin, Amax + 0.5, 1.0) +nA = len(amps) + + +root = 'C:/Users/admin/Desktop/test anim' +mymap = cm.get_cmap('coolwarm') +sm = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin, Amax)) +sm._A = [] + + +i = 0 +for Astim in amps: + + i += 1 + + # Run simulation + print('sim {}/{} ({:.2f} mA/m2, {:.0f} ms)'.format(i, nA, Astim, tstim * 1e3)) + solver = SolverElec() + (t, y) = solver.runSim(neuron, Astim, tstim, toffset) + Vm = y[:, 0] + + # Plot membrane potential profile + fs = 12 + fig, ax = plt.subplots(figsize=(10, 3.5)) + ax.set_xlabel('$time\ (ms)$', fontsize=fs) + ax.set_ylabel('$V_m\ (mV)$', fontsize=fs) + ax.set_ylim(-150.0, 60.0) + ax.set_xticks([0.0, 500.0]) + ax.set_yticks([-100, 50.0]) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + for item in ax.get_xticklabels(): + item.set_fontsize(fs) + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + facecolor='gold', alpha=0.2)) + + ax.plot(t * 1e3, Vm, linewidth=2) + plt.tight_layout() + + fig.subplots_adjust(right=0.80) + cbar_ax = fig.add_axes([0.82, 0.2, 0.02, 0.75]) + fig.add_axes() + fig.colorbar(sm, cax=cbar_ax, ticks=[Astim]) + cbar_ax.set_yticklabels(['{:.2f} mA/m2'.format(Astim)], fontsize=fs) + + fig.savefig('{}/fig{:03d}.png'.format(root, i)) + plt.close(fig) diff --git a/plot/plot_Ih_kinetics.py b/plot/plot_Ih_kinetics.py new file mode 100644 index 0000000..03959b2 --- /dev/null +++ b/plot/plot_Ih_kinetics.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-11 20:35:38 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-17 11:49:41 + +""" Plot the voltage-dependent kinetics of the hyperpolarization-activated + cationic current found in thalamo-cortical neurons. """ + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.colors import LogNorm + +# from PointNICE.solvers import SolverElec +from PointNICE.channels import ThalamoCortical +from PointNICE.utils import rescale + +# -------------- SIMULATION ----------------- + + +# Create channels mechanism +neuron = ThalamoCortical() + + +# Input vectors +nV = 100 +nC = 10 +CCa_min = 0.01 # uM +CCa_max = 10 # um +Vm = np.linspace(-100, 50, nV) # mV +CCa = np.logspace(np.log10(CCa_min), np.log10(CCa_max), nC) # uM + + +# Output matrix: relative activation (0-2) +BA = neuron.betao(Vm) / neuron.alphao(Vm) +P0 = neuron.k2 / (neuron.k2 + neuron.k1 * (CCa * 1e-6)**4) +gH_rel = np.empty((nV, nC)) +for i in range(nC): + O_form = neuron.k4 / (neuron.k3 * (1 - P0[i]) + neuron.k4 * (1 + BA)) + OL_form = (1 - O_form * (1 + BA)) + gH_rel[:, i] = O_form + 2 * OL_form + + +mymap = cm.get_cmap('viridis') +sm = plt.cm.ScalarMappable(cmap=mymap, norm=LogNorm(CCa_min, CCa_max)) +sm._A = [] + +fs = 18 + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.set_title('global activation', fontsize=fs) +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$(O + 2O_L)_{\infty}$', fontsize=fs) +ax.set_yticks([0, 1, 2]) +for i in range(nC): + ax.plot(Vm, gH_rel[:, i], linewidth=2, + c=mymap(rescale(np.log10(CCa[i]), np.log10(CCa_min), np.log10(CCa_max)))) +fig.subplots_adjust(right=0.85) +cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) +fig.add_axes() +fig.colorbar(sm, cax=cbar_ax) +cbar_ax.set_ylabel('$[Ca^{2+}_i]\ (uM)$', fontsize=fs) + + +plt.show() diff --git a/plot/plot_P_vs_I.py b/plot/plot_P_vs_I.py new file mode 100644 index 0000000..2ff6837 --- /dev/null +++ b/plot/plot_P_vs_I.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-07-17 11:47:50 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-21 18:40:04 + +''' plot profile of acoustic Intensity (in W/cm^2) vs Pressure (in kPa) ''' + +import numpy as np +import matplotlib.pyplot as plt +from PointNICE.utils import Pressure2Intensity + +rho = 1075 # kg/m3 +c = 1515 # m/s + +fig, ax = plt.subplots() +ax.set_xlabel('$Pressure\ (kPa)$') +ax.set_ylabel('$I_{SPPA}\ (W/cm^2)$') +ax.set_xscale('log') +ax.set_yscale('log') + +P = np.logspace(np.log10(1e1), np.log10(1e7), num=500) # Pa +Int = Pressure2Intensity(P, rho, c) # W/m2 +ax.plot(P * 1e-3, Int * 1e-4) + + +Psnaps = np.logspace(1, 7, 7) # Pa +for Psnap in Psnaps: + Isnap = Pressure2Intensity(Psnap, rho, c) # W/m2 + ax.plot(np.array([Psnap, Psnap]) * 1e-3, np.array([0.0, Isnap]) * 1e-4, '--', color='black') + ax.plot(np.array([0, Psnap]) * 1e-3, np.array([Isnap, Isnap]) * 1e-4, '--', color='black') + +plt.show() diff --git a/plot/plot_RS_rate_constants.py b/plot/plot_RS_rate_constants.py new file mode 100644 index 0000000..98da0d2 --- /dev/null +++ b/plot/plot_RS_rate_constants.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-01-11 18:54:00 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:00:34 + +""" Plot the rate constants of the sodium and potassium channels +as a function of the membrane potential. """ + +import numpy as np +import matplotlib.pyplot as plt + +from PointNICE.channels import CorticalRS + +# Create channels mechanism +rs_mech = CorticalRS() + +# Define potential and charge input vectors +Cm0 = 1e-2 # F/m2 +Vm = np.linspace(-420, 420, 1000) +Qm = Vm * Cm0 * 1e-3 # C/m2 + +# Plot +fig, axes = plt.subplots(nrows=3, ncols=3) +fs = 16 +st = fig.suptitle("Regular Spiking neuron", fontsize=fs) + +# 1: Vm +ax = axes[0, 0] +ax.set_xlabel('$Q_m\ (nC/cm2)$', fontsize=fs) +ax.set_ylabel('$V_m\ (mV)$', fontsize=fs) +ax.plot(Qm * 1e5, Vm) +ax.set_xlim([-150, 150]) + +# 2: alpha_m +ax = axes[0, 1] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\alpha_m\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.alpham(Vm) * 1e-3) + +# 3: beta_m +ax = axes[0, 2] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\beta_m\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.betam(Vm) * 1e-3) + +# 4: alpha_h +ax = axes[1, 0] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\alpha_h\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.alphah(Vm) * 1e-3) + +# 5: beta_h +ax = axes[1, 1] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\beta_h\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.betah(Vm) * 1e-3) + +# 6: alpha_n +ax = axes[1, 2] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\alpha_n\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.alphan(Vm) * 1e-3) + +# 7: beta_n +ax = axes[2, 0] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\beta_n\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.betan(Vm) * 1e-3) + +# 8: pinf_over_taup +ax = axes[2, 1] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$p_{\\infty} / \\tau_p\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, rs_mech.pinf(Vm) / rs_mech.taup(Vm) * 1e-3) + +# 9: inv_taup +ax = axes[2, 2] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$1 / \\tau_p\ (ms^{-1})$', fontsize=fs) +ax.plot(Vm, 1 / rs_mech.taup(Vm) * 1e-3) + +plt.show() diff --git a/plot/plot_batch.py b/plot/plot_batch.py new file mode 100644 index 0000000..381f7e1 --- /dev/null +++ b/plot/plot_batch.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-03-20 12:19:55 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-21 21:12:31 + +""" Batch plot profiles of several specific output variables of NICE simulations. """ + +import pickle +import ntpath +import re +import inspect +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +import PointNICE +from PointNICE.utils import SaveFigDialog, OpenFilesDialog, getPatchesLoc +from PointNICE.pltvars import pltvars + + +# List of variables to plot and positions +tag = 'test' +varlist = ['Vm', 'VL'] +positions = [0, 0] + +nvars = len(varlist) +naxes = np.unique(positions).size + + +# Plotting options +t_unit = 'ms' # us +t_factor = 1e3 +t_onset = 3e-3 +fs = 15 +show_patches = 1 +plt_show = 1 +plt_save = 0 +fig_ext = 'png' +ask_before_save = 1 + + +# Dictionary of neurons +neurons = {} +for classname, obj in inspect.getmembers(PointNICE.channels): + if inspect.isclass(obj) and isinstance(obj.name, str): + neurons[obj.name] = obj + +# Regular expression for input files +rgxp = re.compile('sim_([A-Za-z]*)_(.*).pkl') + +# Select data files +pkl_filepaths, pkl_dir = OpenFilesDialog('pkl') +if not pkl_filepaths: + print('error: no input file') + quit() + +# Loop through data files +for pkl_filepath in pkl_filepaths: + + # Get code from file name + pkl_filename = ntpath.basename(pkl_filepath) + filecode = pkl_filename[0:-4] + + # Retrieve neuron name + mo = rgxp.fullmatch(pkl_filename) + if not mo: + print('Error: PKL file does not match regular expression pattern') + quit() + neuron_name = mo.group(1) + + # Load data + print('Loading data from "' + pkl_filename + '"') + with open(pkl_filepath, 'rb') as pkl_file: + data = pickle.load(pkl_file) + + # Extract variables + print('Extracting variables') + t = data['t'] + states = data['states'] + tstim = data['tstim'] + Fdrive = data['Fdrive'] + Adrive = data['Adrive'] + params = data['params'] + a = data['a'] + d = data['d'] + geom = {"a": a, "d": d} + + # Initialize BLS and channels mechanism + neuron = neurons[neuron_name]() + + # neuron = neurons[neuron_name] + Qm0 = neuron.Cm0 * neuron.Vm0 * 1e-3 + bls = PointNICE.BilayerSonophore(geom, params, Fdrive, neuron.Cm0, Qm0) + + # Get data of variables to plot + vrs = [] + for i in range(nvars): + pltvar = pltvars[varlist[i]] + if 'alias' in pltvar: + var = eval(pltvar['alias']) + elif 'key' in pltvar: + var = data[pltvar['key']] + elif 'constant' in pltvar: + var = eval(pltvar['constant']) * np.ones(t.size) + else: + var = data[varlist[i]] + vrs.append(var) + + # Determine patches location + npatches, tpatch_on, tpatch_off = getPatchesLoc(t, states) + + # Adding onset to all signals + if t_onset > 0.0: + t = np.insert(t + t_onset, 0, 0.0) + for i in range(nvars): + vrs[i] = np.insert(vrs[i], 0, vrs[i][0]) + tpatch_on += t_onset + tpatch_off += t_onset + + # Plotting + if naxes == 1: + _, ax = plt.subplots(figsize=(11, 4)) + axes = [ax] + else: + _, axes = plt.subplots(naxes, 1, figsize=(11, min(3 * naxes, 9))) + + # Axes + for i in range(naxes): + ax = axes[i] + if positions[i] < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + else: + ax.set_xlabel('$time \ (' + t_unit + ')$', fontsize=fs) + for tick in ax.xaxis.get_major_ticks(): + tick.label.set_fontsize(fs) + ax.locator_params(axis='y', nbins=2) + for tick in ax.yaxis.get_major_ticks(): + tick.label.set_fontsize(fs) + + # Time series + icolor = 0 + for i in range(nvars): + pltvar = pltvars[varlist[i]] + ax = axes[positions[i]] + if 'constant' in pltvar: + ax.plot(t * t_factor, vrs[i] * pltvar['factor'], '--', c='black', lw=4) + else: + ax.plot(t * t_factor, vrs[i] * pltvar['factor'], c='C{}'.format(icolor), lw=4) + if 'min' in pltvar and 'max' in pltvar: + ax.set_ylim(pltvar['min'], pltvar['max']) + if pltvar['unit']: + ax.set_ylabel('${}\ ({})$'.format(pltvar['label'], pltvar['unit']), + fontsize=fs) + else: + ax.set_ylabel('${}$'.format(pltvar['label']), fontsize=fs) + icolor += 1 + + # Patches + if show_patches == 1: + for ax in axes: + (ybottom, ytop) = ax.get_ylim() + for j in range(npatches): + ax.add_patch(patches.Rectangle((tpatch_on[j] * t_factor, ybottom), + (tpatch_off[j] - tpatch_on[j]) * t_factor, + ytop - ybottom, color='#8A8A8A', alpha=0.1)) + + plt.tight_layout() + + # Save figure if needed (automatic or checked) + if plt_save == 1: + if ask_before_save == 1: + plt_filename = SaveFigDialog(pkl_dir, '{}_{}.{}'.format(filecode, tag, fig_ext)) + else: + plt_filename = '{}/{}_{}.{}'.format(pkl_dir, filecode, tag, fig_ext) + if plt_filename: + plt.savefig(plt_filename) + print('Saving figure as "{}"'.format(plt_filename)) + plt.close() + +# Show all plots if needed +if plt_show == 1: + plt.show() diff --git a/plot/plot_comp.py b/plot/plot_comp.py new file mode 100644 index 0000000..c8b29f5 --- /dev/null +++ b/plot/plot_comp.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-13 12:41:26 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-23 14:18:57 + +""" Compare profiles of several specific output variables of NICE simulations. """ + +import pickle +import ntpath +import re +import inspect +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +import PointNICE +from PointNICE.utils import OpenFilesDialog, InteractiveLegend, getPatchesLoc +from PointNICE.pltvars import pltvars + + +# List of variables to plot +# varlist = ['Qm'] +yvars = ['Pac', 'Pmavg', 'Telastic', 'Vm', 'iL'] + +# Plotting options +fs = 12 +show_patches = True + +t_plt = pltvars['t'] +y_pltvars = {key: pltvars[key] for key in yvars} + +# Dictionary of neurons +neurons = {} +for classname, obj in inspect.getmembers(PointNICE.channels): + if inspect.isclass(obj) and isinstance(obj.name, str): + neurons[obj.name] = obj + + +# Regular expression for input files +rgxp = re.compile('sim_([A-Za-z]*)_(.*).pkl') + +# Select data files +pkl_filepaths, _ = OpenFilesDialog('pkl') +if not pkl_filepaths: + print('error: no input file') + quit() + +# Initialize figure and axes +# nvars = len(varlist) +nvars = len(yvars) +if nvars == 1: + _, ax = plt.subplots(figsize=(11, 4)) + axes = [ax] +else: + _, axes = plt.subplots(nvars, 1, figsize=(11, min(3 * nvars, 9))) +labels = [ntpath.basename(fp)[4:-4].replace('_', ' ') for fp in pkl_filepaths] +for i in range(nvars): + ax = axes[i] + # pltvar = pltvars[varlist[i]] + pltvar = y_pltvars[i] + if 'min' in pltvar and 'max' in pltvar: + ax.set_ylim(pltvar['min'], pltvar['max']) + if pltvar['unit']: + ax.set_ylabel('${}\ ({})$'.format(pltvar['label'], pltvar['unit']), fontsize=fs) + else: + ax.set_ylabel('${}$'.format(pltvar['label']), fontsize=fs) + if i < nvars - 1: + ax.get_xaxis().set_ticklabels([]) + else: + ax.set_xlabel('${}\ ({})$'.format(t_plt['label'], t_plt['unit']), fontsize=fs) + for tick in ax.xaxis.get_major_ticks(): + tick.label.set_fontsize(fs) + ax.locator_params(axis='y', nbins=2) + for tick in ax.yaxis.get_major_ticks(): + tick.label.set_fontsize(fs) + + + +# Loop through data files +tstim_ref = 0.0 +nstim = 0 +j = 0 +aliases = {} +for pkl_filepath in pkl_filepaths: + + pkl_filename = ntpath.basename(pkl_filepath) + + # Retrieve neuron name + mo = rgxp.fullmatch(pkl_filename) + if not mo: + print('Error: PKL file does not match regular expression pattern') + quit() + neuron_name = mo.group(1) + + # Load data + print('Loading data from "' + pkl_filename + '"') + with open(pkl_filepath, 'rb') as pkl_file: + data = pickle.load(pkl_file) + + # Extract useful variables + t = data['t'] + states = data['states'] + tstim = data['tstim'] + Fdrive = data['Fdrive'] + Adrive = data['Adrive'] + params = data['params'] + a = data['a'] + d = data['d'] + geom = {"a": a, "d": d} + + # Initialize BLS and channels mechanism + neuron = neurons[neuron_name]() + Qm0 = neuron.Cm0 * neuron.Vm0 * 1e-3 + bls = PointNICE.BilayerSonophore(geom, params, Fdrive, neuron.Cm0, Qm0) + + # Get data of variables to plot + vrs = [] + for i in range(nvars): + pltvar = y_pltvars[i] + # pltvar = pltvars[varlist[i]] + if 'alias' in pltvar: + var = eval(pltvar['alias']) + elif 'key' in pltvar: + var = data[pltvar['key']] + else: + var = data[varlist[i]] + vrs.append(var) + + # Determine patches location + npatches, tpatch_on, tpatch_off = getPatchesLoc(t, states) + + # Adding onset to all signals + if t_plt['onset'] > 0.0: + t = np.insert(t + t_plt['onset'], 0, 0.0) + for i in range(nvars): + vrs[i] = np.insert(vrs[i], 0, vrs[i][0]) + tpatch_on += t_plt['onset'] + tpatch_off += t_plt['onset'] + + # Plotting + handles = [axes[i].plot(t * t_plt['factor'], vrs[i] * pltvars[varlist[i]]['factor'], + linewidth=2, label=labels[j]) for i in range(nvars)] + plt.tight_layout() + + if show_patches: + k = 0 + # stimulation patches + for ax in axes: + handle = handles[k] + (ybottom, ytop) = ax.get_ylim() + la = [] + for i in range(npatches): + la.append(ax.add_patch(patches.Rectangle((tpatch_on[i] * t_plt['factor'], ybottom), + (tpatch_off[i] - tpatch_on[i]) * t_plt['factor'], + ytop - ybottom, + color=handle[0].get_color(), alpha=0.2))) + + aliases[handle[0]] = la + k += 1 + + if tstim != tstim_ref: + if nstim == 0: + nstim += 1 + tstim_ref = tstim + else: + print('Warning: comparing different stimulation durations') + + j += 1 + + +iLegends = [] +for k in range(nvars): + axes[k].legend(loc='upper left', fontsize=fs) + iLegends.append(InteractiveLegend(axes[k].legend_, aliases)) + +plt.show() diff --git a/plot/plot_eff_coeffs.py b/plot/plot_eff_coeffs.py new file mode 100644 index 0000000..52ba3dc --- /dev/null +++ b/plot/plot_eff_coeffs.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-15 15:59:37 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-21 21:11:54 + +''' Plot the profiles of the 9 charge-dependent "effective" HH coefficients, + as a function of charge density or membrane potential. ''' + +import os +import re +import ntpath +import inspect +import pickle +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np + +import PointNICE +from PointNICE.utils import OpenFilesDialog, rescale +from PointNICE.pltvars import pltvars + + +# Dictionary of neurons +neurons = {} +for classname, obj in inspect.getmembers(PointNICE.channels): + if inspect.isclass(obj) and isinstance(obj.name, str): + neurons[obj.name] = obj + +# Select data files (PKL) +lookup_root = '../lookups/' +lookup_absroot = os.path.abspath(lookup_root) +lookup_filepaths, _ = OpenFilesDialog('pkl', lookup_absroot) + +# Quit if no file selected +if not lookup_filepaths: + print('error: no lookup table selected') + quit() + +print('importing lookup tables') + +nfiles = len(lookup_filepaths) +rgxp = re.compile('([A-Za-z]*)_lookups_a(\d*.\d*)nm_f(\d*.\d*)kHz.pkl') +xvar = 'V' # 'Q' (abscissa variable) + +nvars = 9 +fs = 15 + +for i in range(nfiles): + + # Load lookup table + lookup_filename = ntpath.basename(lookup_filepaths[i]) + mo = rgxp.fullmatch(lookup_filename) + if not mo: + print('Error: lookup file does not match regular expression pattern') + else: + # Retrieve stimulus parameters + neuron_name = mo.group(1) + neuron = neurons[neuron_name]() + varlist = neuron.coeff_names + print(varlist) + Fdrive = float(mo.group(3)) * 1e3 + + # Retrieve coefficients data + with open(lookup_filepaths[i], 'rb') as fh: + lookup = pickle.load(fh) + Qm = lookup['Q'] + amps = lookup['A'] + Veff = lookup['V'] + Amin = np.amin(amps) + Amax = np.amax(amps) + Qmin = np.amin(Qm) + Qmax = np.amax(Qm) + namps = amps.size + + # Plotting + print('plotting') + + mymap = cm.get_cmap('jet') + sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) + sm_amp._A = [] + + fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(15, 8)) + + ax = axes[0, 0] + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=fs) + ax.set_ylabel('$V_m\ (mV)$', fontsize=fs) + for i in range(namps): + ax.plot(Qm * 1e5, Veff[i, :] * 1e0, c=mymap(rescale(amps[i], Amin, Amax))) + + for j in range(nvars - 1): + pltvar = pltvars[varlist[j]] + ax = axes[int((j + 1) / 3), (j + 1) % 3] + ax.set_ylabel('${}\ ({})$'.format(pltvar['label'], pltvar['unit']), fontsize=fs) + if xvar == 'Q': + ax.set_xlabel('$Q_m \ (nC/cm^2)$', fontsize=fs) + for i in range(namps): + ax.plot(Qm * 1e5, lookup[varlist[j]][i, :] * pltvar['factor'], + c=mymap(rescale(amps[i], Amin, Amax))) + elif xvar == 'V': + ax.set_xlabel('$V_m \ (mV)$', fontsize=fs) + for i in range(namps): + ax.plot(Veff[i, :] * 1e0, lookup[varlist[j]][i, :] * pltvar['factor'], + c=mymap(rescale(amps[i], Amin, Amax))) + plt.tight_layout() + + fig.subplots_adjust(right=0.85) + cbar_ax = fig.add_axes([0.87, 0.1, 0.02, 0.8]) + fig.add_axes() + fig.colorbar(sm_amp, cax=cbar_ax) + cbar_ax.set_ylabel('$A_{drive} \ (kPa)$', fontsize=fs) + +plt.show() diff --git a/plot/plot_forces.py b/plot/plot_forces.py new file mode 100644 index 0000000..535b82f --- /dev/null +++ b/plot/plot_forces.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-07 10:22:24 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:53:50 + +""" Analysis of the system geometric variables and interplaying forces at +stake in a static quasi-steady NICE system. """ + +import time +import numpy as np +import matplotlib.pyplot as plt + +import PointNICE +from PointNICE.utils import LoadParams, PmCompMethod + + +plt_bool = 1 + +# Initialization: create a BLS instance +params = LoadParams() +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} +Fdrive = 0.0 # dummy stimulation frequency +Cm0 = 1e-2 # membrane resting capacitance (F/m2) +Qm0 = -71.9e-5 # membrane resting charge density (C/m2) + +bls = PointNICE.BilayerSonophore(geom, params, Fdrive, Cm0, Qm0) + + +# Input 1: leaflet deflections +ZMin = -0.45 * bls.Delta +ZMax = 2 * bls.a +nZ = 3000 +Z = np.linspace(ZMin, ZMax, nZ) +Zlb = -0.5 * bls.Delta +Zub = bls.a + +# Input 2: acoustic perturbations +PacMax = 9.5e4 +nPac1 = 5 +nPac2 = 100 +Pac1 = np.linspace(-PacMax, PacMax, nPac1) +Pac2 = np.linspace(-PacMax, PacMax, nPac2) + +# Input 3: membrane charge densities +QmMin = bls.Qm0 +QmMax = 50.0e-5 +nQm = 7 +Qm = np.linspace(QmMin, QmMax, nQm) + + +# Outputs +R = np.empty(nZ) +Cm = np.empty(nZ) +Pm_apex = np.empty(nZ) +Pm_avg = np.empty(nZ) +Pm_avg_predict = np.empty(nZ) +Pg = np.empty(nZ) +Pec = np.empty(nZ) +Pel = np.empty(nZ) +P0 = np.ones(nZ) * bls.P0 +Pnet = np.empty(nZ) +Pqs = np.empty((nZ, nPac1)) +Pecdense = np.empty((nZ, nQm)) +Pnetdense = np.empty((nZ, nQm)) +Zeq = np.empty(nPac1) +Zeq_dense = np.empty(nPac2) + +t0 = time.time() + +# Check net QS pressure at Z = 0 +Peq0 = bls.PtotQS(0.0, bls.ng0, bls.Qm0, 0.0, PmCompMethod.direct) +print('Net QS pressure at Z = 0.0 without perturbation: ' + '{:.2e}'.format(Peq0) + ' Pa') + +# Loop through the deflection vector +for i in range(nZ): + + # 1-dimensional output vectors + R[i] = bls.curvrad(Z[i]) + Cm[i] = bls.Capct(Z[i]) + Pm_apex[i] = bls.PMlocal(0.0, Z[i], R[i]) + Pm_avg[i] = bls.PMavg(Z[i], R[i], bls.surface(Z[i])) + Pm_avg_predict[i] = bls.PMavgpred(Z[i]) + Pel[i] = bls.PEtot(Z[i], R[i]) + Pg[i] = bls.gasmol2Pa(bls.ng0, bls.volume(Z[i])) + Pec[i] = bls.Pelec(Z[i], bls.Qm0) + Pnet[i] = bls.PtotQS(Z[i], bls.ng0, bls.Qm0, 0.0, PmCompMethod.direct) + + # loop through the acoustic perturbation vector an compute 2-dimensional + # balance pressure output vector + for j in range(nPac1): + Pqs[i, j] = bls.PtotQS(Z[i], bls.ng0, bls.Qm0, Pac1[j], PmCompMethod.direct) + + for j in range(nQm): + Pecdense[i, j] = bls.Pelec(Z[i], Qm[j]) + Pnetdense[i, j] = bls.PtotQS(Z[i], bls.ng0, Qm[j], 0.0, PmCompMethod.direct) + +# Compute min local intermolecular pressure +Pm_apex_min = np.amin(Pm_apex) +iPm_apex_min = np.argmin(Pm_apex) +print("min local intermolecular resultant pressure = %.2e Pa for z = %.2f nm" % + (Pm_apex_min, Z[iPm_apex_min] * 1e9)) + +for j in range(nPac1): + Zeq[j] = bls.balancedefQS(bls.ng0, bls.Qm0, Pac1[j], PmCompMethod.direct) +for j in range(nPac2): + Zeq_dense[j] = bls.balancedefQS(bls.ng0, bls.Qm0, Pac2[j], PmCompMethod.direct) + + +t1 = time.time() +print("computation completed in " + '{:.2f}'.format(t1 - t0) + " s") + + +if plt_bool == 1: + + # 1: Intermolecular pressures + fig1, ax = plt.subplots() + fig1.canvas.set_window_title("1: integrated vs. predicted average intermolecular pressure") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('Pressures $(MPa)$', fontsize=18) + ax.grid(True) + ax.plot([Zlb * 1e9, Zlb * 1e9], [np.amin(Pm_avg) * 1e-6, np.amax(Pm_avg) * 1e-6], '--', + color="blue", label="$-\Delta /2$") + ax.plot([Zub * 1e9, Zub * 1e9], [np.amin(Pm_avg) * 1e-6, np.amax(Pm_avg) * 1e-6], '--', + color="red", label="$a$") + ax.plot(Z * 1e9, Pm_avg * 1e-6, '-', label="$P_{M, avg}$", color="green", linewidth=2.0) + ax.plot(Z * 1e9, Pm_avg_predict * 1e-6, '-', label="$P_{M, avg-predict}$", color="red", + linewidth=2.0) + ax.set_xlim(ZMin * 1e9 - 5, ZMax * 1e9) + ax.legend(fontsize=24) + + + # 2: Capacitance and electric pressure + fig2, ax = plt.subplots() + fig2.canvas.set_window_title("2: Capacitance and electric equivalent pressure") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('$C_m \ (uF/cm^2)$', fontsize=18) + ax.plot(Z * 1e9, Cm * 1e2, '-', label="$C_{m}$", color="black", linewidth=2.0) + ax.set_xlim(ZMin * 1e9 - 5, ZMax * 1e9) + ax2 = ax.twinx() + ax2.set_ylabel('$P_{EC}\ (MPa)$', fontsize=18, color='magenta') + ax2.plot(Z * 1e9, Pec * 1e-6, '-', label="$P_{EC}$", color="magenta", linewidth=2.0) + + # tmp: electric pressure for varying membrane charge densities + figtmp, ax = plt.subplots() + figtmp.canvas.set_window_title("electric pressure for varying membrane charges") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('$P_{EC} \ (MPa)$', fontsize=18) + for j in range(nQm): + lbl = "$Q_m$ = " + '{:.2f}'.format(Qm[j] * 1e5) + " nC/cm2" + ax.plot(Z * 1e9, Pecdense[:, j] * 1e-6, '-', label=lbl, linewidth=2.0) + ax.set_xlim(ZMin * 1e9 - 5, ZMax * 1e9) + ax.legend() + + + # tmp: net pressure for varying membrane potentials + figtmp, ax = plt.subplots() + figtmp.canvas.set_window_title("net pressure for varying membrane charges") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('$P_{net} \ (MPa)$', fontsize=18) + for j in range(nQm): + lbl = "$Q_m$ = " + '{:.2f}'.format(Qm[j] * 1e5) + " nC/cm2" + ax.plot(Z * 1e9, Pnetdense[:, j] * 1e-6, '-', label=lbl, linewidth=2.0) + ax.set_xlim(ZMin * 1e9 - 5, ZMax * 1e9) + ax.legend() + + + # 3: Net pressure without perturbation + fig3, ax = plt.subplots() + fig3.canvas.set_window_title("3: Net QS pressure without perturbation") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('Pressures $(kPa)$', fontsize=18) + # ax.grid(True) + # ax.plot([Zlb * 1e9, Zlb * 1e9], [np.amin(Pec) * 1e-3, np.amax(Pm_avg) * 1e-3], '--', + # color="blue", label="$-\Delta / 2$") + # ax.plot([Zub * 1e9, Zub * 1e9], [np.amin(Pec) * 1e-3, np.amax(Pm_avg) * 1e-3], '--', + # color="red", label="$a$") + ax.plot(Z * 1e9, Pg * 1e-3, '-', label="$P_{gas}$", linewidth=3.0, color='C0') + ax.plot(Z * 1e9, -P0 * 1e-3, '-', label="$-P_{0}$", linewidth=3.0, color='C1') + ax.plot(Z * 1e9, Pm_avg * 1e-3, '-', label="$P_{mol}$", linewidth=3.0, color='C2') + ax.plot(Z * 1e9, Pec * 1e-3, '-', label="$P_{elec}$", linewidth=3.0, color='C3') + ax.plot(Z * 1e9, Pel * 1e-3, '-', label="$P_{elastic}$", linewidth=3.0, color='C4') + # ax.plot(Z * 1e9, (Pg - P0 + Pm_avg + Pec + Pel) * 1e-3, '--', label="$P_{net}$", linewidth=2.0, + # color='black') + # ax.plot(Z * 1e9, (Pg - P0 + Pm_avg + Pec - Pnet) * 1e-6, '--', label="$P_{net} diff$", + # linewidth=2.0, color="blue") + ax.set_xlim(ZMin * 1e9 - 5, 30) + ax.set_ylim(-1500, 2000) + ax.legend(fontsize=24) + # ax.grid(True) + + + # 4: QS pressure for different perturbations + fig4, ax = plt.subplots() + fig4.canvas.set_window_title("4: Net QS pressure for different acoustic perturbations") + ax.set_xlabel('Z $(nm)$', fontsize=18) + ax.set_ylabel('Pressures $(MPa)$', fontsize=18) + ax.grid(True) + ax.plot([Zlb * 1e9, Zlb * 1e9], [np.amin(Pqs[:, 0]) * 1e-6, np.amax(Pqs[:, nPac1 - 1]) * 1e-6], + '--', color="blue", label="$-\Delta/2$") + ax.plot([Zub * 1e9, Zub * 1e9], [np.amin(Pqs[:, 0]) * 1e-6, np.amax(Pqs[:, nPac1 - 1]) * 1e-6], + '--', color="red", label="$a$") + ax.set_xlim(ZMin * 1e9 - 5, ZMax * 1e9) + for j in range(nPac1): + lbl = "$P_{A}$ = %.2f MPa" % (Pac1[j] * 1e-6) + ax.plot(Z * 1e9, Pqs[:, j] * 1e-6, '-', label=lbl, linewidth=2.0) + ax.plot([Zeq[j] * 1e9, Zeq[j] * 1e9], [np.amin(Pqs[:, nPac1 - 1]) * 1e-6, + np.amax(Pqs[:, 0]) * 1e-6], '--', color="black") + ax.legend(fontsize=24) + + # 5: QS balance deflection for different acoustic perturbations + fig5, ax = plt.subplots() + fig5.canvas.set_window_title("5: QS balance deflection for different acoustic perturbations ") + ax.set_xlabel('Perturbation $(MPa)$', fontsize=18) + ax.set_ylabel('Z $(nm)$', fontsize=18) + ax.plot([np.amin(Pac2) * 1e-6, np.amax(Pac2) * 1e-6], [Zlb * 1e9, Zlb * 1e9], '--', + color="blue", label="$-\Delta / 2$") + ax.plot([np.amin(Pac2) * 1e-6, np.amax(Pac2) * 1e-6], [Zub * 1e9, Zub * 1e9], '--', + color="red", label="$a$") + ax.plot([-bls.P0 * 1e-6, -bls.P0 * 1e-6], + [np.amin(Zeq_dense) * 1e9, np.amax(Zeq_dense) * 1e9], '--', color="black", + label="$-P_0$") + ax.plot(Pac2 * 1e-6, Zeq_dense * 1e9, '-', label="$Z_{eq}$", linewidth=2.0) + ax.set_xlim(-0.12, 0.12) + ax.set_ylim(ZMin * 1e9 - 5, bls.a * 1e9 + 5) + ax.legend(fontsize=24) + + plt.show() diff --git a/plot/plot_gating_kinetics.py b/plot/plot_gating_kinetics.py new file mode 100644 index 0000000..fd8e552 --- /dev/null +++ b/plot/plot_gating_kinetics.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-11 20:35:38 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-17 13:19:26 + +""" Plot the voltage-dependent steady-states and time constants of activation and inactivation + gates of the different ionic currents involved in the neuron's membrane. """ + +import numpy as np +import matplotlib.pyplot as plt + +from PointNICE.channels import * + +# Create channels mechanism +neuron = LeechTouch() + +# Input membrane potential vector +Vm = np.linspace(-100, 50, 300) + +# iNa gating dynamics +if neuron.name in ['RS', 'FS', 'LTS', 'RE', 'TC']: + am = neuron.alpham(Vm) + bm = neuron.betam(Vm) + tm = 1 / (am + bm) + minf = am * tm + ah = neuron.alphah(Vm) + bh = neuron.betah(Vm) + th = 1 / (ah + bh) + hinf = ah * th +elif neuron.name == 'LeechT': + minf = neuron.minf(Vm) + tm = np.ones(len(Vm)) * neuron.taum + hinf = neuron.hinf(Vm) + th = neuron.tauh(Vm) + +# iK gating dynamics +if neuron.name in ['RS', 'FS', 'LTS', 'RE', 'TC']: + an = neuron.alphan(Vm) + bn = neuron.betan(Vm) + tn = 1 / (an + bn) + ninf = an * tn +elif neuron.name == 'LeechT': + ninf = neuron.ninf(Vm) + tn = neuron.taun(Vm) + +# iM gating dynamics +if neuron.name in ['RS', 'FS', 'LTS']: + tp = neuron.taup(Vm) + pinf = neuron.pinf(Vm) + +# iT gating dynamics +if neuron.name in ['LTS', 'RE', 'TC']: + ts = neuron.taus(Vm) + sinf = neuron.sinf(Vm) + tu = np.array([neuron.tauu(v) for v in Vm]) + uinf = neuron.uinf(Vm) +elif neuron.name == 'LeechT': + sinf = neuron.sinf(Vm) + ts = np.ones(len(Vm)) * neuron.taus + +# iH gating dynamics +if neuron.name in ['TC']: + to = neuron.tauo(Vm) + oinf = neuron.oinf(Vm) + + +# -------------- PLOTTING ----------------- + +fs = 12 +fig, axes = plt.subplots(2) + +fig.suptitle('Gating dynamics') + +ax = axes[0] +ax.get_xaxis().set_ticklabels([]) +# ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$X_{\infty}\ (mV)$', fontsize=fs) +ax.plot(Vm, minf, lw=2, c='C1', label='$m_{\infty}$') +ax.plot(Vm, hinf, '--', lw=2, c='C1', label='$h_{\infty}$') +ax.plot(Vm, ninf, lw=2, c='C0', label='$n_{\infty}$') +if neuron.name in ['RS', 'FS', 'LTS']: + ax.plot(Vm, pinf, lw=2, color='C2', label='$p_{\infty}$') +if neuron.name in ['LTS', 'TC']: + ax.plot(Vm, sinf, lw=2, color='r', label='$s_{\infty}$') + ax.plot(Vm, uinf, '--', lw=2, color='r', label='$u_{\infty}$') +if neuron.name in ['RE']: + ax.plot(Vm, sinf, lw=2, color='C5', label='$s_{\infty}$') + ax.plot(Vm, uinf, '--', lw=2, color='C5', label='$u_{\infty}$') +if neuron.name in ['TC']: + ax.plot(Vm, oinf, lw=2, color='#08457E', label='$o_{\infty}$') +if neuron.name in ['LeechT']: + ax.plot(Vm, sinf, lw=2, color='r', label='$s_{\infty}$') +ax.legend(fontsize=fs, loc=7) + +ax = axes[1] +ax.set_xlabel('$V_m\ (mV)$', fontsize=fs) +ax.set_ylabel('$\\tau_X\ (ms)$', fontsize=fs) +ax.plot(Vm, tm * 1e3, lw=2, c='C1', label='$\\tau_m$') +ax.plot(Vm, th * 1e3, '--', lw=2, c='C1', label='$\\tau_h$') +ax.plot(Vm, tn * 1e3, lw=2, c='C0', label='$\\tau_n$') +if neuron.name in ['RS', 'FS', 'LTS']: + ax.plot(Vm, tp * 1e3, lw=2, color='C2', label='$\\tau_p$') +if neuron.name in ['LTS', 'TC']: + ax.plot(Vm, ts * 1e3, lw=2, color='r', label='$\\tau_s$') + ax.plot(Vm, tu * 1e3, '--', lw=2, color='r', label='$\\tau_u$') +if neuron.name in ['RE']: + ax.plot(Vm, ts * 1e3, lw=2, color='C5', label='$\\tau_s$') + ax.plot(Vm, tu * 1e3, '--', lw=2, color='C5', label='$\\tau_u$') +if neuron.name in ['TC']: + ax.plot(Vm, to * 1e3, lw=2, color='#08457E', label='$\\tau_o$') +if neuron.name in ['LeechT']: + ax.plot(Vm, ts * 1e3, lw=2, color='r', label='$\\tau_s$') +ax.legend(fontsize=fs, loc=7) + +plt.show() diff --git a/postpro/postpro_latency.py b/postpro/postpro_latency.py new file mode 100644 index 0000000..caa879e --- /dev/null +++ b/postpro/postpro_latency.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-31 10:10:41 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:39:46 + +""" Test relationship between stimulus intensity and response latency. """ + +import numpy as np +import matplotlib.pyplot as plt + +from PointNICE.utils import ImportExcelCol, LoadParams + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Define import settings +xls_file = "C:/Users/admin/Desktop/Model output/NBLS spikes 0.35MHz/nbls_log_spikes_0.35MHz.xlsx" +sheet = 'Data' + +# Import data +f = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 # Hz +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 # Pa +T = ImportExcelCol(xls_file, sheet, 'G', 2) * 1e-3 # s +N = ImportExcelCol(xls_file, sheet, 'Q', 2) +L = ImportExcelCol(xls_file, sheet, 'R', 2) # ms + +# Retrieve unique values of latencies (for min. 2 spikes) and corresponding amplitudes +iremove = np.where(N < 2)[0] +A_true = np.delete(A, iremove) +L_true = np.delete(L, iremove).astype(np.float) +latencies, indices = np.unique(L_true, return_index=True) +amplitudes = A_true[indices] + +# Convert amplitudes to intensities +intensities = amplitudes**2 / (2 * ac_imp) * 1e-4 # W/cm2 + +# Plot latency vs. amplitude +fig1, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$Amplitude \ (kPa)$", fontsize=28) +ax.set_ylabel("$Latency \ (ms)$", fontsize=28) +ax.scatter(amplitudes * 1e-3, latencies, color='black', s=100) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + + +# Plot latency vs. intensity +fig2, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$Intensity \ (W/cm^2)$", fontsize=28) +ax.set_ylabel("$Latency \ (ms)$", fontsize=28) +ax.scatter(intensities, latencies, color='black', s=100) +ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8]) +ax.set_yticks([25, 35, 55, 65]) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +plt.show() diff --git a/postpro/postpro_rmse_charge.py b/postpro/postpro_rmse_charge.py new file mode 100644 index 0000000..de16fbd --- /dev/null +++ b/postpro/postpro_rmse_charge.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-11-01 16:35:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:47:31 + +""" Compute RMSE between charge profiles of NICE output. """ + +import pickle +import ntpath +import numpy as np + +from PointNICE.utils import OpenFilesDialog, rmse + +# Define options +pkl_root = "../../Output/test Elec/" +t_offset = 10e-3 # s + +# Select data files (PKL) +pkl_filepaths, pkl_dir = OpenFilesDialog('pkl') + +# Quit if no file selected +if not pkl_filepaths: + print('error: no input file') + quit() + +# Quit if more than 2 files +if len(pkl_filepaths) > 2: + print('error: cannot compare more than 2 methods') + quit() + +# Load data from file 1 +pkl_filename = ntpath.basename(pkl_filepaths[0]) +print('Loading data from "' + pkl_filename + '"') +with open(pkl_filepaths[0], 'rb') as pkl_file: + data = pickle.load(pkl_file) + +t1 = data['t'] +tstim1 = data['tstim'] +toffset1 = data['toffset'] +f1 = data['Fdrive'] +A1 = data['Adrive'] +Q1 = data['Qm'] * 1e2 # nC/cm2 +states1 = data['states'] + +# Load data from file 2 +pkl_filename = ntpath.basename(pkl_filepaths[1]) +print('Loading data from "' + pkl_filename + '"') +with open(pkl_filepaths[1], 'rb') as pkl_file: + data = pickle.load(pkl_file) + +t2 = data['t'] +tstim2 = data['tstim'] +toffset2 = data['toffset'] +f2 = data['Fdrive'] +A2 = data['Adrive'] +Q2 = data['Qm'] * 1e2 # nC/cm2 +states2 = data['states'] + +if tstim1 != tstim2 or f1 != f2 or A1 != A2 or toffset1 != toffset2: + print('error: different stimulation conditions') +else: + print('comparing charge profiles') + + tcomp = np.arange(0, tstim1 + toffset1, 1e-3) # every ms + Qcomp1 = np.interp(tcomp, t1, Q1) + Qcomp2 = np.interp(tcomp, t2, Q2) + Q_rmse = rmse(Qcomp1, Qcomp2) + print('rmse = {:.5f} nC/cm2'.format(Q_rmse * 1e5)) + diff --git a/postpro/postpro_sensitivity_diameter.py b/postpro/postpro_sensitivity_diameter.py new file mode 100644 index 0000000..6d33b3a --- /dev/null +++ b/postpro/postpro_sensitivity_diameter.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-05 11:04:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:00:40 + +""" Test influence of structure diameter on BLS cavitation amplitude. """ + +import numpy as np +from scipy.optimize import curve_fit +import matplotlib.pyplot as plt + +from PointNICE.utils import ImportExcelCol + + +def f(x, a_, b_): + """ Fitting function """ + return a_ * np.power(x, b_) + + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/BLS Z diameter/bls_logZ_diameter.xlsx" +sheet = 'Data' +rd = ImportExcelCol(xls_file, sheet, 'C', 2) * 1e-9 +eAmax = ImportExcelCol(xls_file, sheet, 'M', 2) + +# Discard outliers +rd = rd[0:-5] +eAmax = eAmax[0:-5] + + +# Compute best power fit for eAmax +popt, pcov = curve_fit(f, rd, eAmax) +(a, b) = popt +if a < 1e-4: + a_str = '{:.2e}'.format(a) +else: + a_str = '{:.4f}'.format(a) +print("global least-square power fit: eAmax = " + a_str + " * a^" + '{:.2f}'.format(b)) + +# Compute predicted data and associated error +eAmax_predicted = f(rd, a, b) +residuals = eAmax - eAmax_predicted +ss_res = np.sum(residuals**2) +ss_tot = np.sum((eAmax - np.mean(eAmax))**2) +r_squared_eAmax = 1 - (ss_res / ss_tot) +print("R-squared = " + '{:.5f}'.format(r_squared_eAmax)) +N = residuals.size +std_err = np.sqrt(ss_res / N) +print("standard error: sigma_err = " + str(std_err)) + +# Plot areal strain vs. in-plane radius (data and best fit) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$a \ (nm)$", fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +ax.scatter(rd * 1e9, eAmax, color='blue', linewidth=2, label="data") +ax.plot(rd * 1e9, eAmax_predicted, '--', color='black', linewidth=2, + label="model: $\epsilon_{A,max} \propto = a^{" + '{:.2f}'.format(b) + "}$") +xlim = ax.get_xlim() +ylim = ax.get_ylim() +ax.text(xlim[0] + 0.1 * (xlim[1] - xlim[0]), ylim[0] + 0.6 * (ylim[1] - ylim[0]), + "$R^2 = " + '{:.5f}'.format(r_squared_eAmax) + "$", fontsize=28, color="black") +ax.legend(loc=4, fontsize=24) + +plt.show() diff --git a/postpro/postpro_sensitivity_embedding.py b/postpro/postpro_sensitivity_embedding.py new file mode 100644 index 0000000..fd8551a --- /dev/null +++ b/postpro/postpro_sensitivity_embedding.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-05 11:04:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:51:33 + +""" Test influence of tissue embedding on BLS cavitation amplitude. """ + +import numpy as np +from scipy.optimize import curve_fit +import matplotlib.pyplot as plt + +from PointNICE.utils import ImportExcelCol + + +def powerfit(x, a_, b_): + """fitting function""" + return a_ * np.power(x, b_) + + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/BLS Z tissue/bls_logZ_a32.0nm_embedding.xlsx" +sheet = 'Data' +rd = ImportExcelCol(xls_file, sheet, 'C', 2) * 1e-9 +th = ImportExcelCol(xls_file, sheet, 'D', 2) * 1e-6 +eAmax = ImportExcelCol(xls_file, sheet, 'M', 2) + + +# Filter out rows that don't match a specific radius value +a_ref = 32.0e-9 # (m) +imatch = np.where(rd == a_ref) +rd = rd[imatch] +th = th[imatch] +eAmax = eAmax[imatch] +print(str(imatch[0].size) + " values matching required radius") + + +# Compute best power fit for eAmax +popt, pcov = curve_fit(powerfit, th, eAmax) +(a, b) = popt +if a < 1e-4: + a_str = '{:.2e}'.format(a) +else: + a_str = '{:.4f}'.format(a) +print("global least-square power fit: eAmax = " + a_str + " * d^" + '{:.2f}'.format(b)) + + +# Compute predicted data and associated error +eAmax_predicted = powerfit(th, a, b) +residuals = eAmax - eAmax_predicted +ss_res = np.sum(residuals**2) +ss_tot = np.sum((eAmax - np.mean(eAmax))**2) +r_squared_eAmax = 1 - (ss_res / ss_tot) +print("R-squared = " + '{:.5f}'.format(r_squared_eAmax)) + + +# Plot areal strain vs. thickness (data and best fit) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$d \ (um)$", fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +ax.scatter(th * 1e6, eAmax, color='blue', linewidth=2, label="data") +ax.plot(th * 1e6, eAmax_predicted, '--', color='black', linewidth=2, + label="model: $\epsilon_{A,max} \propto = d^{" + '{:.2f}'.format(b) + "}$") +xlim = ax.get_xlim() +ylim = ax.get_ylim() +ax.text(xlim[0] + 0.4 * (xlim[1] - xlim[0]), ylim[0] + 0.5 * (ylim[1] - ylim[0]), + "$R^2 = " + '{:.5f}'.format(r_squared_eAmax) + "$", fontsize=28, color="black") +ax.legend(loc=1, fontsize=24) + +# Show plots +plt.show() diff --git a/postpro/postpro_sensitivity_stim_embedded.py b/postpro/postpro_sensitivity_stim_embedded.py new file mode 100644 index 0000000..92fdb84 --- /dev/null +++ b/postpro/postpro_sensitivity_stim_embedded.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-05 11:04:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:00:48 + +""" Test influence of acoustic amplitude and frequency on cavitation amplitude of embedded BLS. """ + +import numpy as np +from scipy.optimize import curve_fit +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from mpl_toolkits.mplot3d import Axes3D + +from PointNICE.utils import ImportExcelCol, ConstructMatrix + + +def powerfit(X_, a_, b_, c_): + """ Fitting function """ + x, y = X_ + return a_ * np.power(x, b_) * np.power(y, c_) + + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/BLS Z 32nm radius/10um embedding/bls_logZ_a32.0nm_d10.0um.xlsx" +sheet = 'Data' +f = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 +eAmax = ImportExcelCol(xls_file, sheet, 'M', 2) + +# Compute best power fit +p0 = 1e-3, 0.8, -0.5 +popt, pcov = curve_fit(powerfit, (A, f), eAmax, p0) +(a, b, c) = popt +if a < 1e-4: + a_str = '{:.2e}'.format(a) +else: + a_str = '{:.4f}'.format(a) +print("global least-square power fit: eAmax = %s * A^%.2f * f^%.2f" % (a_str, b, c)) + +# Compute predicted data and associated error +eAmax_predicted = powerfit((A, f), a, b, c) +residuals = eAmax - eAmax_predicted +ss_res = np.sum(residuals**2) +ss_tot = np.sum((eAmax - np.mean(eAmax))**2) +r_squared = 1 - (ss_res / ss_tot) +print("R-squared = " + '{:.5f}'.format(r_squared)) + +# Reshape serialized data into 2 dimensions +(freqs, amps, eAmax_2D, nholes) = ConstructMatrix(f, A, eAmax) +nFreqs = freqs.size +nAmps = amps.size +fmax = np.amax(freqs) +fmin = np.amin(freqs) +Amax = np.amax(amps) +Amin = np.amin(amps) +print(str(nholes) + " hole(s) in reconstructed matrix") + +# Create colormap +mymap = cm.get_cmap('jet') + +# Plot areal strain vs. amplitude (with frequency color code) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$A \ (kPa)$", fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +for i in range(nFreqs): + ax.plot(amps * 1e-3, eAmax_2D[i, :], c=mymap((freqs[i] - fmin) / (fmax - fmin)), + label='f = ' + str(freqs[i] * 1e-3) + ' kHz') +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) +sm_freq = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(fmin * 1e-3, fmax * 1e-3)) +sm_freq._A = [] +cbar = plt.colorbar(sm_freq) +cbar.ax.set_ylabel('$f \ (kHz)$', fontsize=28) +for item in cbar.ax.get_yticklabels(): + item.set_fontsize(24) + +# Plot areal strain vs. frequency (with amplitude color code) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$f \ (kHz)$", fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +for j in range(nAmps): + ax.plot(freqs * 1e-3, eAmax_2D[:, j], c=mymap((amps[j] - Amin) / (Amax - Amin)), + label='A = ' + str(amps[j] * 1e-3) + ' kPa') +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) +sm_amp = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Amin * 1e-3, Amax * 1e-3)) +sm_amp._A = [] +cbar = plt.colorbar(sm_amp) +cbar.ax.set_ylabel("$A \ (kPa)$", fontsize=28) +for item in cbar.ax.get_yticklabels(): + item.set_fontsize(24) + + +# 3D surface plot: eAmax = f(f,A) +if nholes == 0: + X, Y = np.meshgrid(freqs * 1e-6, amps * 1e-6) + fig = plt.figure(figsize=(12, 9)) + ax = fig.gca(projection=Axes3D.name) + ax.plot_surface(X, Y, eAmax_2D, rstride=1, cstride=1, cmap=mymap, linewidth=0, + antialiased=False) + ax.set_xlabel("$A \ (MPa)$", fontsize=24, labelpad=20) + ax.set_ylabel("$f \ (MHz)$", fontsize=24, labelpad=20) + ax.set_zlabel("$\epsilon_{A, max}$", fontsize=24, labelpad=20) + ax.view_init(30, 135) + for item in ax.get_yticklabels(): + item.set_fontsize(24) + for item in ax.get_xticklabels(): + item.set_fontsize(24) + for item in ax.get_zticklabels(): + item.set_fontsize(24) + + +# Plot optimal power fit vs. areal strain (with frequency color code) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$%s\ A^{%.2f}\ f^{%.2f}$" % (a_str, b, c), fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +for i in range(nFreqs): + ax.scatter(a * amps**b * freqs[i]**c, eAmax_2D[i, :], s=40, + c=mymap((freqs[i] - fmin) / (fmax - fmin)), label='f = %f kHz' % (freqs[i] * 1e-3)) +ax.set_xlim([0.0, 1.1 * (a * Amax**b * fmin**c)]) +ax.set_ylim([0.0, 1.1 * eAmax_2D[0, -1]]) +ax.text(0.4 * eAmax_2D[0, -1], 0.9 * eAmax_2D[0, -1], "$R^2 = " + '{:.5f}'.format(r_squared) + "$", + fontsize=24, color="black") +ax.set_xticks([0, np.round(np.amax(eAmax_2D) * 1e2) / 1e2]) +ax.set_yticks([np.round(np.amax(eAmax_2D) * 1e2) / 1e2]) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) +cbar = plt.colorbar(sm_freq) +cbar.ax.set_ylabel('$f \ (kHz)$', fontsize=28) +for item in cbar.ax.get_yticklabels(): + item.set_fontsize(24) + +plt.show() diff --git a/postpro/postpro_sensitivity_stim_exposed.py b/postpro/postpro_sensitivity_stim_exposed.py new file mode 100644 index 0000000..fa993a8 --- /dev/null +++ b/postpro/postpro_sensitivity_stim_exposed.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-05 11:04:43 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:00:50 + +""" Test influence of acoustic pressure amplitude on cavitation amplitude of exposed BLS. """ + +import sys +import numpy as np +from scipy.optimize import curve_fit +import matplotlib.pyplot as plt + +sys.path.append('C:/Users/admin/Google Drive/PhD/NICE model/PointNICE') +from PointNICE.utils import ImportExcelCol + + +def powerfit(x, a_, b_): + """ Fitting function. """ + return a_ * np.power(x, b_) + + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/BLS Z 32nm radius/0um embedding/bls_logZ_a32.0nm_d0.0um.xlsx" +sheet = 'Data' +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 +eAmax = ImportExcelCol(xls_file, sheet, 'M', 2) + +# Sort data by increasing Pac amplitude +Asort = A.argsort() +A = A[Asort] +eAmax = eAmax[Asort] + +# Compute best power fit for eAmax +popt, pcov = curve_fit(powerfit, A, eAmax) +(a, b) = popt +if a < 1e-4: + a_str = '{:.2e}'.format(a) +else: + a_str = '{:.4f}'.format(a) +print("global least-square power fit: eAmax = %s * A^%.2f" % (a_str, b)) + +# Compute predicted data and associated error +eAmax_predicted = powerfit(A, a, b) +residuals = eAmax - eAmax_predicted +ss_res = np.sum(residuals**2) +ss_tot = np.sum((eAmax - np.mean(eAmax))**2) +r_squared_eAmax = 1 - (ss_res / ss_tot) +print("R-squared = " + '{:.5f}'.format(r_squared_eAmax)) + +# Plot areal strain vs. acoustic pressure amplitude (data and best fit) +fig, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$A \ (kPa)$", fontsize=28) +ax.set_ylabel("$\epsilon_{A, max}$", fontsize=28) +ax.scatter(A * 1e-3, eAmax, color='blue', linewidth=2, label="data") +ax.plot(A * 1e-3, eAmax_predicted, '--', color='black', linewidth=2, + label="model: $\epsilon_{A,max} \propto = A^{" + '{:.2f}'.format(b) + "}$") +xlim = ax.get_xlim() +ylim = ax.get_ylim() +ax.text(xlim[0] + 0.1 * (xlim[1] - xlim[0]), ylim[0] + 0.6 * (ylim[1] - ylim[0]), + "$R^2 = " + '{:.5f}'.format(r_squared_eAmax) + "$", fontsize=28, color="black") +ax.legend(loc=4, fontsize=24) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +plt.show() diff --git a/postpro/postpro_spikerate.py b/postpro/postpro_spikerate.py new file mode 100644 index 0000000..ce2446d --- /dev/null +++ b/postpro/postpro_spikerate.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-31 11:27:34 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:46:49 + +""" Test relationship between stimulus intensity spike rate. """ + +import numpy as np +from scipy.optimize import curve_fit +import matplotlib.pyplot as plt + +from PointNICE.utils import ImportExcelCol, LoadParams + + +def fitfunc(x, a, b): + """ Fitting function """ + return a * np.power(x, b) + + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/NBLS spikes 0.35MHz/nbls_log_spikes_0.35MHz.xlsx" +sheet = 'Data' +f = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 # Hz +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 # Pa +T = ImportExcelCol(xls_file, sheet, 'G', 2) * 1e-3 # s +N = ImportExcelCol(xls_file, sheet, 'Q', 2) +FR = ImportExcelCol(xls_file, sheet, 'S', 2) # ms + +# Retrieve available spike rates values (for min. 3 spikes) and corresponding amplitudes +iremove = np.where(N < 15)[0] +A_true = np.delete(A, iremove) +spikerates = np.delete(FR, iremove).astype(np.float) +amplitudes = np.delete(A, iremove) + +# Convert amplitudes to intensities +intensities = amplitudes**2 / (2 * ac_imp) * 1e-4 # W/cm2 + +# Power law least square fitting +popt, pcov = curve_fit(fitfunc, intensities, spikerates) +print('power product fit: FR = %.2f I^%.2f' % (popt[0], popt[1])) + +# Compute predicted data and associated error +spikerates_predicted = fitfunc(intensities, popt[0], popt[1]) +residuals = spikerates - spikerates_predicted +ss_res = np.sum(residuals**2) +ss_tot = np.sum((spikerates - np.mean(spikerates))**2) +r_squared = 1 - (ss_res / ss_tot) +print("R-squared = " + '{:.5f}'.format(r_squared)) + +# Plot latency vs. amplitude +fig1, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$Amplitude \ (kPa)$", fontsize=28) +ax.set_ylabel("$Spike\ Rate \ (spikes/ms)$", fontsize=28) +ax.scatter(amplitudes * 1e-3, spikerates, color='black') +ax.set_ylim(0, 1.1 * np.amax(spikerates)) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +# Plot latency vs. intensity +fig2, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$Intensity \ (W/cm^2)$", fontsize=28) +ax.set_ylabel("$Spike\ Rate \ (spikes/ms)$", fontsize=28) +ax.scatter(intensities, spikerates, color='black', label='$data$') +ax.plot(intensities, spikerates_predicted, color='blue', + label='$%.2f\ I^{%.2f}$' % (popt[0], popt[1])) +ax.set_ylim(0, 1.1 * np.amax(spikerates)) +ax.legend(fontsize=28) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +plt.show() diff --git a/postpro/postpro_spikes.py b/postpro/postpro_spikes.py new file mode 100644 index 0000000..78fad20 --- /dev/null +++ b/postpro/postpro_spikes.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-27 09:50:55 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:48:58 + +""" Test influence of acoustic intensity and duration on number of spikes. """ + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from mpl_toolkits.mplot3d import Axes3D + +from PointNICE.utils import ImportExcelCol, ConstructMatrix, LoadParams + + +# Define options +plot2d_bool = 0 +plot3d_show = 1 +plot3d_save = 0 +plt_root = "../Output/effective spikes 2D/" +plt_save_ext = '.png' + + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Import data +xls_file = "../../Output/effective spikes 2D/nbls_log_spikes.xlsx" +sheet = 'Data' +f_all = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 # Hz +A_all = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 # Pa +T_all = ImportExcelCol(xls_file, sheet, 'G', 2) * 1e-3 # s +N_all = ImportExcelCol(xls_file, sheet, 'Q', 2) # number of spikes + +freqs = np.unique(f_all) + +for Fdrive in freqs: + + # Select data + A = A_all[f_all == Fdrive] + T = T_all[f_all == Fdrive] + N = N_all[f_all == Fdrive] + + # Reshape serialized data into 2 dimensions + (durations, amps, nspikes, nholes) = ConstructMatrix(T, A, N) + nspikes2 = nspikes.conj().T # conjugate tranpose of nspikes matrix (for surface plot) + + # Convert to appropriate units + intensities = amps**2 / (2 * ac_imp) * 1e-4 # W/cm2 + durations = durations * 1e3 # ms + + nDurations = durations.size + nIntensities = intensities.size + + Tmax = np.amax(durations) + Tmin = np.amin(durations) + Imax = np.amax(intensities) + Imin = np.amin(intensities) + print(str(nholes) + " hole(s) in reconstructed matrix") + + mymap = cm.get_cmap('jet') + + if plot2d_bool == 1: + + # Plot spikes vs. intensity (with duration color code) + fig, ax = plt.subplots(figsize=(12, 9)) + ax.set_xlabel("$I \ (W/cm^2)$", fontsize=28) + ax.set_ylabel("$\#\ spikes$", fontsize=28) + for i in range(nIntensities): + ax.plot(intensities, nspikes[i, :], c=mymap((durations[i] - Tmin) / (Tmax - Tmin)), + label='t = ' + str(durations[i]) + ' ms') + sm_duration = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Tmin, Tmax)) + sm_duration._A = [] + cbar = plt.colorbar(sm_duration) + cbar.ax.set_ylabel('$duration \ (ms)$', fontsize=28) + + # Plot spikes vs. duration (with intensity color code) + fig, ax = plt.subplots(figsize=(12, 9)) + ax.set_xlabel("$duration \ (ms)$", fontsize=28) + ax.set_ylabel("$\#\ spikes$", fontsize=28) + for j in range(nDurations): + ax.plot(durations, nspikes[:, j], c=mymap((intensities[j] - Imin) / (Imax - Imin)), + label='I = ' + str(intensities[j]) + ' W/cm2') + sm_int = plt.cm.ScalarMappable(cmap=mymap, norm=plt.Normalize(Imin, Imax)) + sm_int._A = [] + cbar = plt.colorbar(sm_int) + cbar.ax.set_ylabel("$I \ (W/cm^2)$", fontsize=28) + + + if plot3d_show == 1 and nholes == 0: + + # 3D surface plot: nspikes = f(duration, intensity) + X, Y = np.meshgrid(durations, intensities) + fig = plt.figure(figsize=(12, 9)) + ax = fig.gca(projection=Axes3D.name) + ax.plot_surface(X, Y, nspikes2, rstride=1, cstride=1, cmap=mymap, linewidth=0, + antialiased=False) + ax.set_xlabel("$duration \ (ms)$", fontsize=24, labelpad=20) + ax.set_ylabel("$intensity \ (W/cm^2)$", fontsize=24, labelpad=20) + ax.set_zlabel("$\#\ spikes$", fontsize=24, labelpad=20) + csetx = ax.contour(X, Y, nspikes2, zdir='x', offset=150, cmap=cm.coolwarm) + csety = ax.contour(X, Y, nspikes2, zdir='y', offset=0.8, cmap=cm.coolwarm) + ax.view_init(33, -126) + ax.set_xticks([0, 50, 100, 150]) + ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8]) + ax.set_zticks([0, 20, 40, 60, 80]) + for item in ax.get_yticklabels(): + item.set_fontsize(24) + for item in ax.get_xticklabels(): + item.set_fontsize(24) + for item in ax.get_zticklabels(): + item.set_fontsize(24) + + # Save figure if needed + if plot3d_save == 1: + plt_filename = '{}spikes_{:.0f}KHz{}'.format(plt_root, Fdrive * 1e-3, plt_save_ext) + plt.savefig(plt_filename) + print('Saving figure to "' + plt_root + '"') + plt.close() + + plt.show() diff --git a/postpro/postpro_threshold_duration.py b/postpro/postpro_threshold_duration.py new file mode 100644 index 0000000..8c51987 --- /dev/null +++ b/postpro/postpro_threshold_duration.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-30 21:48:45 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:49:11 + +""" Test relationship between stimulus duration and minimum acoustic +amplitude / intensity / energy for AP generation. """ + +import numpy as np +import matplotlib.pyplot as plt + +from PointNICE.utils import ImportExcelCol, LoadParams + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/NBLS titration duration 0.35MHz/nbls_log_titration_duration_0.35MHz.xlsx" +sheet = 'Data' +f = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 # Hz +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 # Pa +T = ImportExcelCol(xls_file, sheet, 'G', 2) * 1e-3 # s +N = ImportExcelCol(xls_file, sheet, 'Q', 2) + +# Convert to appropriate units +durations = T * 1e3 # ms +Trange = np.amax(durations) - np.amin(durations) +amplitudes = A * 1e-3 # kPa +intensities = A**2 / (2 * ac_imp) * 1e-4 # W/cm2 +energies = intensities * durations # mJ/cm2 + +# Plot threshold amplitude vs. duration +fig1, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$duration \ (ms)$", fontsize=28) +ax.set_ylabel("$Amplitude \ (kPa)$", fontsize=28) +ax.scatter(durations, amplitudes, color='black', s=100) +ax.set_xlim(np.amin(durations) - 0.1 * Trange, np.amax(durations) + 0.1 * Trange) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +# Plot threshold intensity vs. duration +fig2, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$duration \ (ms)$", fontsize=28) +ax.set_ylabel("$Intensity \ (W/cm^2)$", fontsize=28) +ax.scatter(durations, intensities, color='black', s=100) +ax.set_xlim(np.amin(durations) - 0.1 * Trange, np.amax(durations) + 0.1 * Trange) +ax.set_yticks([np.floor(np.amin(intensities) * 1e2) / 1e2, np.ceil(np.amax(intensities) * 1e2) / 1e2]) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +# Plot threshold energy vs. duration +fig3, ax = plt.subplots(figsize=(12, 9)) +ax.set_xlabel("$duration \ (ms)$", fontsize=28) +ax.set_ylabel("$Energy \ (mJ/cm^2)$", fontsize=28) +ax.scatter(durations, energies, color='black', s=100) +ax.set_xlim(np.amin(durations) - 0.1 * Trange, np.amax(durations) + 0.1 * Trange) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + + +plt.show() diff --git a/postpro/postpro_threshold_frequency.py b/postpro/postpro_threshold_frequency.py new file mode 100644 index 0000000..dc5e348 --- /dev/null +++ b/postpro/postpro_threshold_frequency.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-30 21:48:45 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:49:20 + +""" Test relationship between stimulus frequency and minimum acoustic intensity +for AP generation. """ + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker + +from PointNICE.utils import ImportExcelCol, LoadParams + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Import data +xls_file = "C:/Users/admin/Desktop/Model output/NBLS titration frequency 30ms/nbls_log_titration_frequency_30ms.xlsx" +sheet = 'Data' +f = ImportExcelCol(xls_file, sheet, 'E', 2) * 1e3 # Hz +A = ImportExcelCol(xls_file, sheet, 'F', 2) * 1e3 # Pa +T = ImportExcelCol(xls_file, sheet, 'G', 2) * 1e-3 # s +N = ImportExcelCol(xls_file, sheet, 'Q', 2) + +# Convert to appropriate units +frequencies = f * 1e-6 # MHz +amplitudes = A * 1e-3 # kPa +intensities = A**2 / (2 * ac_imp) * 1e-4 # W/cm2 + +# Plot threshold amplitude vs. duration +fig1, ax = plt.subplots(figsize=(12, 9)) +ax.set_xscale('log') +ax.set_xlabel("$Frequency \ (MHz)$", fontsize=28) +ax.set_ylabel("$Amplitude \ (kPa)$", fontsize=28) +ax.scatter(frequencies, amplitudes, color='black', s=100) +ax.set_xlim(1.5e-1, 5e0) +ax.set_xscale('log') +ax.set_xticks([0.2, 1, 4]) +ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter()) +ax.set_yticks([np.floor(np.amin(amplitudes)), np.ceil(np.amax(amplitudes))]) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + + +# Plot threshold intensity vs. duration +fig2, ax = plt.subplots(figsize=(12, 9)) +ax.set_xscale('log') +ax.set_xlabel("$Frequency \ (MHz)$", fontsize=28) +ax.set_ylabel("$Intensity \ (W/cm^2)$", fontsize=28) +ax.scatter(frequencies, intensities, color='black', s=100) +ax.set_xlim(1.5e-1, 5e0) +ax.set_xscale('log') +ax.set_xticks([0.2, 1, 4]) +ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter()) +ax.set_yticks([np.floor(np.amin(intensities) * 1e2) / 1e2, np.ceil(np.amax(intensities) * 1e2) / 1e2]) +for item in ax.get_yticklabels(): + item.set_fontsize(24) +for item in ax.get_xticklabels(): + item.set_fontsize(24) + +plt.show() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b80ed4f --- /dev/null +++ b/setup.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-13 09:40:02 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 19:21:59 + +from setuptools import setup + + +def readme(): + with open('README.rst', encoding="utf8") as f: + return f.read() + + +setup(name='PointNICE', + version='1.0', + description='Optimized Python implementation of the NICE model', + long_description=readme(), + url='???', + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Science/Research', + 'Programming Language :: Python :: 3', + 'Topic :: Scientific/Engineering :: Physics' + ], + keywords=('ultrasound ultrasonic neuromodulation neurostimulation excitation\ + biophysical model intramembrane cavitation NICE'), + author='Théo Lemaire', + author_email='theo.lemaire@epfl.ch', + license='MIT', + packages=['PointNICE'], + install_requires=[ + 'numpy>=1.10', + 'scipy>=0.17', + 'matplotlib>=2', + 'openpyxl>=2.4', + 'pyyaml>=3.11' + ], + zip_safe=False) diff --git a/sim/ASTIM_lookups.py b/sim/ASTIM_lookups.py new file mode 100644 index 0000000..9004833 --- /dev/null +++ b/sim/ASTIM_lookups.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-02 17:50:10 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 14:30:33 + +""" Create lookup tables for different acoustic frequencies. """ + +import logging +import numpy as np + +import PointNICE +from PointNICE.utils import LoadParams +from PointNICE.channels import * + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# BLS parameters +params = LoadParams() + +# Geometry of NBLS structure +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} + +# Channel mechanisms +neurons = [ThalamoCortical()] + +# Stimulation parameters +freqs = [690e3] # Hz +amps = np.logspace(np.log10(0.1), np.log10(600), num=50) * 1e3 # Pa +amps = np.insert(amps, 0, 0.0) # adding amplitude 0 + +logger.info('Starting batch lookup creation') + +for ch_mech in neurons: + # Create a SolverUS instance (with dummy frequency parameter) + solver = PointNICE.SolverUS(geom, params, ch_mech, 0.0) + charges = np.arange(np.round(ch_mech.Vm0 - 10.0), 50.0 + 1.0, 1.0) * 1e-5 # C/m2 + + # Create lookups for each frequency + for Fdrive in freqs: + solver.createLookup(ch_mech, Fdrive, amps, charges) + +logger.info('Lookup tables successfully created') diff --git a/sim/ASTIM_mech_batch.py b/sim/ASTIM_mech_batch.py new file mode 100644 index 0000000..29d2933 --- /dev/null +++ b/sim/ASTIM_mech_batch.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-11-21 10:46:56 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 16:33:16 + +""" Run batch simulations of the NICE mechanical model with imposed charge densities """ + +import time +import logging +import pickle +import numpy as np + +import PointNICE +from PointNICE.utils import LoadParams, xlslog, CheckBatchLog + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# Select output directory +try: + (batch_dir, log_filepath) = CheckBatchLog('mech') +except AssertionError as err: + logger.error(err) + quit() + +# Define naming and logging settings +sim_str = 'sim_{:.0f}nm_{:.0f}kHz_{:.0f}kPa_{:.1f}nCcm2_mech' +sim_log = 'simulation %u/%u (a = %.1f nm, d = %.1f um, f = %.2f kHz, A = %.2f kPa, Q = %.1f nC/cm2)' + + +logger.info("Starting BLS simulation batch") + +# Load NICE parameters +params = LoadParams() +biomech = params['biomech'] +ac_imp = biomech['rhoL'] * biomech['c'] # Rayl + +# Set geometry of NBLS structure +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} +Cm0 = 1e-2 # membrane resting capacitance (F/m2) +Qm0 = -54.0e-5 # membrane resting charge density (C/m2) + +# Set stimulation parameters +freqs = [6.9e5] # Hz +amps = [1.43e3] # Pa +charges = np.linspace(0.0, 80.0, 81) * 1e-5 # C/m2 + +# Run simulations +nsims = len(freqs) * len(amps) * len(charges) +simcount = 0 +for Fdrive in freqs: + try: + bls = PointNICE.BilayerSonophore(geom, params, Fdrive, Cm0, Qm0) + # Create SolverUS instance (compression modulus of embedding tissue depends on frequency) + + for Adrive in amps: + for Qm in charges: + + simcount += 1 + + # Get date and time info + date_str = time.strftime("%Y.%m.%d") + daytime_str = time.strftime("%H:%M:%S") + + # Log to console + logger.info(sim_log, simcount, nsims, a * 1e9, d * 1e6, Fdrive * 1e-3, + Adrive * 1e-3, Qm * 1e5) + + # Run simulation + tstart = time.time() + (t, y, states) = bls.runMech(Fdrive, Adrive, Qm) + (Z, ng) = y + U = np.insert(np.diff(Z) / np.diff(t), 0, 0.0) + tcomp = time.time() - tstart + logger.info('completed in %.2f seconds', tcomp) + + # Export data to PKL file + simcode = sim_str.format(a * 1e9, Fdrive * 1e-3, Adrive * 1e-3, Qm * 1e5) + datafile_name = batch_dir + '/' + simcode + ".pkl" + data = {'a': a, + 'd': d, + 'params': params, + 'Fdrive': Fdrive, + 'Adrive': Adrive, + 'phi': np.pi, + 'Qm': Qm, + 't': t, + 'states': states, + 'U': U, + 'Z': Z, + 'ng': ng} + + with open(datafile_name, 'wb') as fh: + pickle.dump(data, fh) + + # Compute key output metrics + Zmax = np.amax(Z) + Zmin = np.amin(Z) + Zabs_max = np.amax(np.abs([Zmin, Zmax])) + eAmax = bls.arealstrain(Zabs_max) + Tmax = bls.TEtot(Zabs_max) + Pmmax = bls.PMavgpred(Zmin) + ngmax = np.amax(ng) + dUdtmax = np.amax(np.abs(np.diff(U) / np.diff(t)**2)) + + # Export key metrics to log xls file + log = { + 'A': date_str, + 'B': daytime_str, + 'C': a * 1e9, + 'D': d * 1e6, + 'E': Fdrive * 1e-3, + 'F': Adrive * 1e-3, + 'G': Qm * 1e5, + 'H': t.size, + 'I': tcomp, + 'J': bls.kA + bls.kA_tissue, + 'K': Zmax * 1e9, + 'L': eAmax, + 'M': Tmax * 1e3, + 'N': (ngmax - bls.ng0) / bls.ng0, + 'O': Pmmax * 1e-3, + 'P': dUdtmax + } + + success = xlslog(log_filepath, 'Data', log) + if success == 1: + logger.info('log exported to "%s"', log_filepath) + else: + logger.error('log export to "%s" aborted', log_filepath) + + except AssertionError as err: + logger.error(err) diff --git a/sim/ASTIM_sim_batch.py b/sim/ASTIM_sim_batch.py new file mode 100644 index 0000000..0284b3e --- /dev/null +++ b/sim/ASTIM_sim_batch.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-13 18:16:09 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 18:39:57 + +""" Run batch acoustic simulations of the NICE model. """ + +import logging +import numpy as np +from PointNICE.solvers import runSimBatch +from PointNICE.channels import * +from PointNICE.utils import LoadParams, CheckBatchLog + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# BLS parameters +bls_params = LoadParams() + +# Geometry of NBLS structure +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} + +# Channels mechanisms +neurons = [CorticalRS()] + +# Stimulation parameters +stim_params = { + 'freqs': [3.5e5], # Hz + 'amps': [100e3], # Pa + 'durations': [50e-3], # s + 'PRFs': [1e2], # Hz + 'DFs': [1.0] +} +stim_params['offsets'] = [30e-3] * len(stim_params['durations']) # s + +# stim_params = { +# 'freqs': np.array([200, 400, 600, 800, 1000]) * 1e3, # Hz +# 'amps': np.array([10, 20, 40, 80, 150, 300, 600]) * 1e3, # Pa +# 'durs': np.array([20, 40, 60, 80, 100, 150, 200, 250, 300]) * 1e-3, # s +# 'PRFs': np.array([0.1, 0.2, 0.5, 1, 2, 5, 10]) * 1e3, # Hz +# 'DFs': np.array([1, 2, 5, 10, 25, 50, 75, 100]) / 100 +# } +# stim_params['offsets'] = 350e-3 - stim_params['durations'] # s + +# Simulation type +sim_type = 'effective' + +# Select output directory +try: + (batch_dir, log_filepath) = CheckBatchLog('elec') +except AssertionError as err: + logger.error(err) + quit() + +# Run simulation batch +runSimBatch(batch_dir, log_filepath, neurons, bls_params, geom, stim_params, sim_type) diff --git a/sim/ASTIM_titration_batch.py b/sim/ASTIM_titration_batch.py new file mode 100644 index 0000000..5d260c3 --- /dev/null +++ b/sim/ASTIM_titration_batch.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-02-13 18:16:09 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 17:15:29 + +""" Run batch parameter titrations of the NICE model. """ + +import logging +import numpy as np +from PointNICE.solvers import runTitrationBatch +from PointNICE.channels import * +from PointNICE.utils import LoadParams, CheckBatchLog + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# BLS parameters +bls_params = LoadParams() + +# Geometry of NBLS structure +a = 32e-9 # in-plane radius (m) +d = 0.0e-6 # embedding tissue thickness (m) +geom = {"a": a, "d": d} + +# Channels mechanisms +neurons = [CorticalRS()] + +# Stimulation parameters +stim_params = { + 'freqs': [3.5e5], # Hz + # 'amps': [100e3], # Pa + 'durations': [50e-3], # s + 'PRFs': [1e2], # Hz + 'DFs': [1.0] +} + +# Select output directory +try: + (batch_dir, log_filepath) = CheckBatchLog('elec') +except AssertionError as err: + logger.error(err) + quit() + +# Run titration batch +runTitrationBatch(batch_dir, log_filepath, neurons, bls_params, geom, stim_params) diff --git a/sim/ESTIM_sim_batch.py b/sim/ESTIM_sim_batch.py new file mode 100644 index 0000000..bdc74ff --- /dev/null +++ b/sim/ESTIM_sim_batch.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2016-10-11 20:35:38 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 18:08:12 + +""" Run simulations of the HH system with injected electric current, +and plot resulting dynamics. """ + +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +from PointNICE.solvers import SolverElec +from PointNICE.channels import * + + +# -------------- SIMULATION ----------------- + + +# Create channels mechanism +neuron = LeechTouch() + +print('initial states:') +print('Vm0 = {:.2f}'.format(neuron.Vm0)) +for i in range(len(neuron.states_names)): + if neuron.states_names[i] == 'C_Ca': + print('{}0 = {:.3f} uM'.format(neuron.states_names[i], neuron.states0[i] * 1e6)) + else: + print('{}0 = {:.2f}'.format(neuron.states_names[i], neuron.states0[i])) + + +# Set pulse parameters +tstim = 1.5 # s +toffset = 0.5 # s +Astim = 4.1 # mA/m2 + +show_currents = False + +# Run simulation +print('stimulating {} neuron ({:.2f} mA/m2, {:.0f} ms)'.format(neuron.name, Astim, tstim * 1e3)) +solver = SolverElec() +(t, y) = solver.runSim(neuron, Astim, tstim, toffset) + + +# -------------- VARIABLES SEPARATION ----------------- + + +# Membrane potential and states +Vm = y[:, 0] +states = y[:, 1:].T + +# Final states +statesf = y[-1, 1:] +print('final states:') +print('Vmf = {:.2f}'.format(Vm[-1])) +for i in range(len(neuron.states_names)): + if len(neuron.states_names[i]) == 1: # channel state + print('{}f = {:.2f}'.format(neuron.states_names[i], statesf[i])) + else: # other state + print('{}f = {:f}'.format(neuron.states_names[i], statesf[i])) + + +# Leakage current and net current +iL = neuron.currL(Vm) +iNet = neuron.currNet(Vm, states) + +# Sodium and Potassium gating dynamics and currents +m = y[:, 1] +h = y[:, 2] +n = y[:, 3] +iNa = neuron.currNa(m, h, Vm) +iK = neuron.currK(n, Vm) + + +corticals = ['RS', 'FS', 'LTS'] +thalamics = ['RE', 'TC'] +leeches = ['LeechT'] + +# Cortical neurons +if neuron.name in corticals: + p = y[:, 4] + iM = neuron.currM(p, Vm) + + # Special case: LTS neuron + if neuron.name == 'LTS': + s = y[:, 5] + u = y[:, 6] + iCa = neuron.currCa(s, u, Vm) + + +# Thalamic neurons +if neuron.name in thalamics: + s = y[:, 4] + u = y[:, 5] + iCa = neuron.currCa(s, u, Vm) + + # Special case: TC neuron + if neuron.name == 'TC': + O = y[:, 6] + C = y[:, 7] + P0 = y[:, 8] + C_Ca = y[:, 9] + OL = 1 - O - C + P1 = 1 - P0 + Ih = neuron.currH(O, C, Vm) + IKL = neuron.currKL(Vm) + +# Leech neurons +if neuron.name in leeches: + s = y[:, 4] + C_Na = y[:, 5] + A_Na = y[:, 6] + C_Ca = y[:, 7] + A_Ca = y[:, 8] + iCa = neuron.currCa(s, Vm) + iPumpNa = neuron.currPumpNa(C_Na, Vm) + iKCa = neuron.currKCa(C_Ca, Vm) + + +# -------------- PLOTTING ----------------- + +fs = 12 +if neuron.name == 'TC': + naxes = 7 +if neuron.name in ['LTS', 'RE']: + naxes = 5 +if neuron.name in ['RS', 'FS']: + naxes = 4 +if neuron.name in leeches: + naxes = 7 + +if not show_currents: + naxes -= 1 + +height = 5.5 +if neuron.name == 'TC': + height = 7 +fig, axes = plt.subplots(naxes, 1, figsize=(10, height)) + +# Membrane potential +i = 0 +ax = axes[i] +ax.plot(t * 1e3, Vm, linewidth=2) +ax.set_ylabel('$V_m\ (mV)$', fontsize=fs) +if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) +ax.locator_params(axis='y', nbins=2) +for item in ax.get_yticklabels(): + item.set_fontsize(fs) +(ybottom, ytop) = ax.get_ylim() +ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + +# iNa dynamics +i += 1 +ax = axes[i] +ax.set_ylim([-0.1, 1.1]) +ax.set_ylabel('$Na^+ \ kin.$', fontsize=fs) +ax.plot(t * 1e3, m, color='blue', linewidth=2, label='$m$') +ax.plot(t * 1e3, h, color='red', linewidth=2, label='$h$') +ax.plot(t * 1e3, m**2 * h, '--', color='black', linewidth=2, label='$m^2h$') +(ybottom, ytop) = ax.get_ylim() +ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) +ax.legend(fontsize=fs, loc=7) +if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) +ax.locator_params(axis='y', nbins=2) +for item in ax.get_yticklabels(): + item.set_fontsize(fs) + + +# iK & iM dynamics +i += 1 +ax = axes[i] +ax.set_ylim([-0.1, 1.1]) +ax.set_ylabel('$K^+ \ kin.$', fontsize=fs) +ax.plot(t * 1e3, n, color='#734d26', linewidth=2, label='$n$') +if neuron.name in ['RS', 'FS', 'LTS']: + ax.plot(t * 1e3, p, color='#660099', linewidth=2, label='$p$') +(ybottom, ytop) = ax.get_ylim() +ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) +ax.legend(fontsize=fs, loc=7) +if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) +ax.locator_params(axis='y', nbins=2) +for item in ax.get_yticklabels(): + item.set_fontsize(fs) + +# iCa dynamics +if neuron.name in ['LTS', 'RE', 'TC', 'LeechT']: + i += 1 + ax = axes[i] + ax.set_ylim([-0.1, 1.1]) + ax.set_ylabel('$Ca^{2+} \ kin.$', fontsize=fs) + ax.plot(t * 1e3, s, color='#2d862d', linewidth=2, label='$s$') + if neuron.name in ['LTS', 'RE', 'TC']: + ax.plot(t * 1e3, u, color='#e68a00', linewidth=2, label='$u$') + ax.plot(t * 1e3, s**2 * u, '--', color='black', linewidth=2, label='$s^2u$') + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + ax.legend(fontsize=fs, loc=7) + if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + + +# iH dynamics +if neuron.name == 'TC': + i += 1 + ax = axes[i] + ax.set_ylim([-0.1, 2.1]) + ax.set_ylabel('$i_H\ kin.$', fontsize=fs) + # ax.plot(t * 1e3, C, linewidth=2, label='$C$') + ax.plot(t * 1e3, O, linewidth=2, label='$O$') + ax.plot(t * 1e3, OL, linewidth=2, label='$O_L$') + ax.plot(t * 1e3, O + 2 * OL, '--', color='black', linewidth=2, label='$O + 2O_L$') + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + ax.legend(fontsize=fs, ncol=2, loc=7) + if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + +# submembrane [Ca2+] dynamics +if neuron.name in ['TC', 'LeechT']: + i += 1 + ax = axes[i] + if neuron.name == 'TC': + ax.set_ylabel('$[Ca^{2+}_i]\ (uM)$', fontsize=fs) + ax.plot(t * 1e3, C_Ca * 1e6, linewidth=2, label='$[Ca^{2+}_i]$') + if neuron.name == 'LeechT': + ax.set_ylabel('$[Ca^{2+}_i]\ (arb.)$', fontsize=fs) + ax.plot(t * 1e3, C_Ca, linewidth=2, label='$[Ca^{2+}_i]$') + ax.plot(t * 1e3, A_Ca, linewidth=2, label='$A_{Ca}$') + ax.legend(fontsize=fs, loc=7) + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + +# submembrane [Na+] dynamics +if neuron.name == 'LeechT': + i += 1 + ax = axes[i] + ax.set_ylabel('$[Na^{+}_i]\ (arb.)$', fontsize=fs) + ax.plot(t * 1e3, C_Na, linewidth=2, label='$[Na^{+}_i]$') + ax.plot(t * 1e3, A_Na, linewidth=2, label='$A_{Na}$') + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + ax.legend(fontsize=fs, loc=7) + if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + +# currents +if show_currents: + i += 1 + ax = axes[i] + ax.set_ylabel('$I\ (A/m^2)$', fontsize=fs) + ax.set_xlabel('$time\ (ms)$', fontsize=fs) + ax.plot(t * 1e3, iNa * 1e-3, linewidth=2, label='$i_{Na}$') + ax.plot(t * 1e3, iK * 1e-3, linewidth=2, label='$i_K$') + if neuron.name in ['RS', 'FS', 'LTS']: + ax.plot(t * 1e3, iM * 1e-3, linewidth=2, label='$i_M$') + if neuron.name in ['LTS', 'TC', 'LeechT']: + ax.plot(t * 1e3, iCa * 1e-3, linewidth=2, label='$i_{T}$') + if neuron.name == 'RE': + ax.plot(t * 1e3, iCa * 1e-3, linewidth=2, label='$i_{TS}$') + if neuron.name == 'TC': + ax.plot(t * 1e3, Ih * 1e-3, linewidth=2, label='$i_{H}$') + ax.plot(t * 1e3, IKL * 1e-3, linewidth=2, label='$i_{KL}$') + if neuron.name == 'LeechT': + ax.plot(t * 1e3, iKCa * 1e-3, linewidth=2, label='$i_{K,Ca}$') + ax.plot(t * 1e3, iPumpNa * 1e-3, linewidth=2, label='$i_{Na\ pump}$') + ax.plot(t * 1e3, iL * 1e-3, linewidth=2, label='$i_L$') + ax.plot(t * 1e3, iNet * 1e-3, '--', linewidth=2, color='black', label='$i_{Net}$') + ax.legend(fontsize=fs, ncol=2, loc=7) + ax.locator_params(axis='y', nbins=2) + for item in ax.get_yticklabels(): + item.set_fontsize(fs) + if i < naxes - 1: + ax.get_xaxis().set_ticklabels([]) + (ybottom, ytop) = ax.get_ylim() + ax.add_patch(patches.Rectangle((0.0, ybottom), tstim * 1e3, ytop - ybottom, + color='#8A8A8A', alpha=0.1)) + + +axes[-1].set_xlabel('$time\ (ms)$', fontsize=fs) +for item in axes[-1].get_xticklabels(): + item.set_fontsize(fs) + +if tstim > 0.0: + title = '{} neuron ({:.2f} mA/m2, {:.0f} ms)'.format(neuron.name, Astim, tstim * 1e3) +else: + title = '{} neuron (free, {:.0f} ms)'.format(neuron.name, toffset * 1e3) +fig.suptitle(title, fontsize=fs) + +plt.show() diff --git a/tests/test_basic.py b/tests/test_basic.py new file mode 100644 index 0000000..07ab006 --- /dev/null +++ b/tests/test_basic.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-14 18:37:45 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-08-22 18:38:58 + +''' Test the basic functionalities of the package. ''' + +import logging +import numpy as np +import PointNICE +from PointNICE.utils import LoadParams, detectSpikes +from PointNICE.channels import CorticalRS +from PointNICE.constants import * + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# Set geometry of NBLS structure +geom = {"a": 32e-9, "d": 0.0e-6} + +# Defining general stimulation parameters +Fdrive = 3.5e5 # Hz +Adrive = 1e5 # Pa +PRF = 1.5e3 # Hz +DF = 1 + +logger.info('Starting basic tests') + +logger.info('Test 1: Loading parameters') +params = LoadParams() + +logger.info('Test 2: Creating typical BLS instance') +Cm0 = 1e-2 # membrane resting capacitance (F/m2) +Qm0 = -89e-5 # membrane resting charge density (C/m2) + +bls = PointNICE.BilayerSonophore(geom, params, Fdrive, Cm0, Qm0) + +logger.info('Test 3: Running simulation of the mechanical system') +charges = np.linspace(-100, 50, 10) * 1e-5 # C/m2 +for Qm in charges: + bls.runMech(Fdrive, 2e4, Qm) + + +logger.info('Test 4: Creating channel mechanism') +rs_mech = CorticalRS() + + +logger.info('Test 5: Creating typical SolverUS instance') +solver = PointNICE.SolverUS(geom, params, rs_mech, Fdrive) + + +logger.info('Test 6: running short classic simulation of the full system') +tstim = 1e-3 # s +toffset = 1e-3 # s +(t, y, _) = solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'classic') +Qm = y[2] +n_spikes, _, _ = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) +assert n_spikes == 0, 'Error: number of spikes should be 0' +logger.info('0 spike detected --> OK') + + +logger.info('Test 7: running hybrid simulation') +tstim = 30e-3 # s +toffset = 10e-3 # s +(t, y, _) = solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'hybrid') +Qm = y[2] +n_spikes, _, _ = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) +assert n_spikes == 1, 'Error: number of spikes should be 1' +logger.info('1 spike detected --> OK') + + +logger.info('Test 8: creating dummy lookup file') +amps = np.array([1, 2]) * 1e5 # Pa +charges = np.array([-80.0, 30.0]) * 1e-5 # C/m2 +tmp = rs_mech.name +rs_mech.name = 'test' +solver.createLookup(rs_mech, Fdrive, amps, charges) +rs_mech.name = tmp + + +logger.info('Test 9: running effective simulation') +tstim = 30e-3 # s +toffset = 10e-3 # s +(t, y, _) = solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'effective') +Qm = y[2] +n_spikes, _, _ = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) +assert n_spikes == 1, 'Error: number of spikes should be 1' +logger.info('1 spike detected --> OK') + + +quit() + + +logger.info('Test 10: running effective amplitude titration') +tstim = 30e-3 # s +toffset = 10e-3 # s +Arange = (0.0, 2 * TITRATION_AMAX) # Pa +(Athr, t, y, _, latency) = solver.titrateAmp(rs_mech, Fdrive, Arange, tstim, toffset, + PRF, DF, 'effective') +Qm = y[2] +n_spikes, _, _ = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) +assert n_spikes == 1, 'Error: number of spikes should be 1' +logger.info('1 spike detected --> OK') + + +logger.info('Test 11: running effective duration titration') +trange = (0.0, 2 * TITRATION_TMAX) # s +toffset = 10e-3 # s +(tthr, t, y, _, latency) = solver.titrateDur(rs_mech, Fdrive, Adrive, trange, toffset, + PRF, DF, 'effective') +Qm = y[2] +n_spikes, _, _ = detectSpikes(t, Qm, SPIKE_MIN_QAMP, SPIKE_MIN_DT) +assert n_spikes == 1, 'Error: number of spikes should be 1' +logger.info('1 spike detected --> OK') + + +logger.info('All tests successfully completed') diff --git a/tests/test_graphs.py b/tests/test_graphs.py new file mode 100644 index 0000000..3fd89ed --- /dev/null +++ b/tests/test_graphs.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# @Author: Theo Lemaire +# @Date: 2017-06-14 18:37:45 +# @Email: theo.lemaire@epfl.ch +# @Last Modified by: Theo Lemaire +# @Last Modified time: 2017-07-18 15:54:31 + +''' Test the basic functionalities of the package and output graphs of the call flows. ''' + +import logging +from pycallgraph import PyCallGraph +from pycallgraph.output import GraphvizOutput + +import PointNICE +from PointNICE.utils import LoadParams +from PointNICE.channels import CorticalRS + + +# Set logging options +logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:') +logger = logging.getLogger('PointNICE') +logger.setLevel(logging.DEBUG) + +# Create Graphviz output object +graphviz = GraphvizOutput() + +# Set geometry of NBLS structure +geom = {"a": 32e-9, "d": 0.0e-6} + +# Loading model parameters +params = LoadParams() + +# Defining general stimulation parameters +Fdrive = 3.5e5 # Hz +Adrive = 1e5 # Pa +PRF = 1.5e3 # Hz +DF = 1 + + +logger.info('Graph 1: BLS initialization') +Cm0 = 1e-2 # membrane resting capacitance (F/m2) +Qm0 = -89e-5 # membrane resting charge density (C/m2) +graphviz.output_file = 'graphs/bls_init.png' +with PyCallGraph(output=graphviz): + bls = PointNICE.BilayerSonophore(geom, params, Fdrive, Cm0, Qm0) + + +logger.info('Graph 2: Channels mechanism initialization') +graphviz.output_file = 'graphs/sim_mech.png' +with PyCallGraph(output=graphviz): + bls.runMech(Fdrive, 2e4, Qm0) + + +logger.info('Graph 3: Channels mechanism initialization') +graphviz.output_file = 'graphs/channel_init.png' +with PyCallGraph(output=graphviz): + rs_mech = CorticalRS() + + +logger.info('Graph 4: SolverUS initialization') +graphviz.output_file = 'graphs/solver_init.png' +with PyCallGraph(output=graphviz): + solver = PointNICE.SolverUS(geom, params, rs_mech, Fdrive) + + +logger.info('Graph 5: classic simulation') +tstim = 1e-3 # s +toffset = 1e-3 # s +graphviz.output_file = 'graphs/sim_classic.png' +with PyCallGraph(output=graphviz): + solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'classic') + + +logger.info('Graph 6: effective simulation') +tstim = 30e-3 # s +toffset = 10e-3 # s +graphviz.output_file = 'graphs/sim_effective.png' +with PyCallGraph(output=graphviz): + solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'effective') + + +logger.info('Graph 7: hybrid simulation') +tstim = 10e-3 # s +toffset = 1e-3 # s +graphviz.output_file = 'graphs/sim_hybrid.png' +with PyCallGraph(output=graphviz): + solver.runSim(rs_mech, Fdrive, Adrive, tstim, toffset, PRF, DF, 'hybrid') + + +