diff --git a/MANIFEST.in b/MANIFEST.in index 3e3d1c2..3ee6e72 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ include README.md -include PySONIC/neurons/*.pkl -include /scripts/*.py -include /tests/* -include /notebooks/*.ipynb \ No newline at end of file +include PySONIC/lookups/*.pkl +include scripts/*.py +include tests/* +include notebooks/*.ipynb \ No newline at end of file diff --git a/PySONIC/core/bls_lookups.json b/PySONIC/core/bls_lookups.json index 06c6b7d..738f3d3 100644 --- a/PySONIC/core/bls_lookups.json +++ b/PySONIC/core/bls_lookups.json @@ -1,1053 +1,1062 @@ { "32.0": { "-80.00": { "LJ_approx": { "x0": 1.7875580514692446e-09, "C": 14506.791031634148, "nrep": 3.911252335063797, "nattr": 0.9495868868453603 }, "Delta_eq": 1.2344323203763867e-09 }, "-71.40": { "LJ_approx": { "x0": 1.710159362626304e-09, "C": 16757.44053535206, "nrep": 3.9149844779001572, "nattr": 0.9876139143736086 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.588820457014353e-09, "C": 21124.28839722447, "nrep": 3.9219530533405726, "nattr": 1.0531179666960837 }, "Delta_eq": 1.302942739961778e-09 }, "-71.90": { "LJ_approx": { "x0": 1.7142977983903395e-09, "C": 16627.43538695451, "nrep": 3.9147721975981384, "nattr": 0.9855168537576823 }, "Delta_eq": 1.2553492695740507e-09 }, "-89.50": { "LJ_approx": { "x0": 1.8913883171160228e-09, "C": 12016.525797229067, "nrep": 3.9069373029335464, "nattr": 0.9021994595277029 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.6390264131559902e-09, "C": 19180.97634755811, "nrep": 3.9188840789597705, "nattr": 1.0250251620607604 }, "Delta_eq": 1.281743450351987e-09 }, "-53.00": { "LJ_approx": { "x0": 1.5830321174402216e-09, "C": 21361.655211483354, "nrep": 3.9223254792281588, "nattr": 1.0564645995714745 }, "Delta_eq": 1.305609024046854e-09 }, "-53.58": { "LJ_approx": { "x0": 1.5863754403940291e-09, "C": 21224.206759769622, "nrep": 3.922109852077156, "nattr": 1.0545313303221624 }, "Delta_eq": 1.3040630712174578e-09 }, "-48.87": { "LJ_approx": { "x0": 1.5603070731170595e-09, "C": 22321.280954434333, "nrep": 3.9238276518118833, "nattr": 1.0698008224359472 }, "Delta_eq": 1.3165739825437056e-09 }, "0.00": { "LJ_approx": { "x0": 1.429523524073023e-09, "C": 28748.036227122713, "nrep": 3.9338919786768276, "nattr": 1.1551044201542804 }, "Delta_eq": 1.4e-09 }, "-70.00": { "LJ_approx": { "x0": 1.698788510560293e-09, "C": 17120.631318195712, "nrep": 3.915575488491436, "nattr": 0.9934238714780391 }, "Delta_eq": 1.2603339470322538e-09 }, "-58.00": { "LJ_approx": { "x0": 1.6131662659976035e-09, "C": 20156.47605325608, "nrep": 3.9204295233162925, "nattr": 1.0393049952285334 }, "Delta_eq": 1.2922508866011204e-09 }, "-140.00": { "LJ_approx": { "x0": 3.5396589580589484e-09, "C": 1255.8321160636908, "nrep": 3.879907809497444, "nattr": 0.4190657482583384 }, "Delta_eq": 1.1019265101358682e-09 }, "-200.00": { "LJ_approx": { "x0": 5.467498689940948e-09, "C": 298.4575230665454, "nrep": 3.8382806376855165, "nattr": 0.0014805372073950717 }, "Delta_eq": 1.0050623818936587e-09 }, "-60.00": { "LJ_approx": { "x0": 1.6260780786690872e-09, "C": 19662.79538138057, "nrep": 3.9196487714944097, "nattr": 1.0321270951610706 }, "Delta_eq": 1.2869009570089227e-09 }, "-160.00": { "LJ_approx": { "x0": 5.77921444789264e-09, "C": 204.15927774362973, "nrep": 3.866389064592209, "nattr": 0.03773979793348341 }, "Delta_eq": 1.0662975449878705e-09 }, "10.00": { "LJ_approx": { "x0": 1.4352403492312405e-09, "C": 28434.36006445571, "nrep": 3.9333944930338163, "nattr": 1.1510029210218344 }, "Delta_eq": 1.3954197265639115e-09 }, "20.00": { "LJ_approx": { "x0": 1.4521735572363723e-09, "C": 27522.575366234458, "nrep": 3.93195491401856, "nattr": 1.1390890902608632 }, "Delta_eq": 1.3824571964886577e-09 + }, + "-50.00": { + "LJ_approx": { + "x0": 1.5663532171069699e-09, + "C": 22061.611190291416, + "nrep": 3.9234216613472546, + "nattr": 1.0662169394628622 + }, + "Delta_eq": 1.313576328857944e-09 } }, "64.0": { "-80.00": { "LJ_approx": { "x0": 1.783357531675752e-09, "C": 14639.319598806138, "nrep": 3.9113027551187414, "nattr": 0.9404151935643594 }, "Delta_eq": 1.2344323203763867e-09 }, "-71.90": { "LJ_approx": { "x0": 1.7103254451796522e-09, "C": 16775.90747591089, "nrep": 3.9148582072320104, "nattr": 0.9747613204242506 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7061996856525807e-09, "C": 16906.878806702443, "nrep": 3.9150725853841957, "nattr": 0.976778398349503 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.585264156646392e-09, "C": 21303.176047683613, "nrep": 3.92211004079812, "nattr": 1.0402641595550777 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.886870747500225e-09, "C": 12129.260307725155, "nrep": 3.906945602966425, "nattr": 0.895598309376088 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.635297932833928e-09, "C": 19347.359224637305, "nrep": 3.9190113341505843, "nattr": 1.0129039638328117 }, "Delta_eq": 1.281743450351987e-09 }, "-58.00": { "LJ_approx": { "x0": 1.6095250707781465e-09, "C": 20329.27848623273, "nrep": 3.92057191136993, "nattr": 1.0267862446034561 }, "Delta_eq": 1.2922508866011204e-09 }, "-140.00": { "LJ_approx": { "x0": 3.5398097121754107e-09, "C": 1255.8690004347025, "nrep": 3.8798490490747404, "nattr": 0.4604604439108636 }, "Delta_eq": 1.1019265101358682e-09 }, "-200.00": { "LJ_approx": { "x0": 6.6005456899992844e-09, "C": 144.85407475420143, "nrep": 3.838295127819493, "nattr": 0.00992375961984237 }, "Delta_eq": 1.0050623818936587e-09 }, "-60.00": { "LJ_approx": { "x0": 1.6223930024686038e-09, "C": 19832.380892638503, "nrep": 3.9197835635468294, "nattr": 1.019801114668753 }, "Delta_eq": 1.2869009570089227e-09 }, "-160.00": { "LJ_approx": { "x0": 6.33567252974036e-09, "C": 143.09307106178315, "nrep": 3.866387420765763, "nattr": 0.08081951236969916 }, "Delta_eq": 1.0662975449878705e-09 } }, "50.0": { "-71.90": { "LJ_approx": { "x0": 1.7114794411958874e-09, "C": 16732.841575829825, "nrep": 3.914827883392826, "nattr": 0.9781401389550711 }, "Delta_eq": 1.2553492695740507e-09 } }, "10.0": { "0.00": { "LJ_approx": { "x0": 1.4403460578039628e-09, "C": 27932.27792195569, "nrep": 3.9334138654752686, "nattr": 1.19526523864855 }, "Delta_eq": 1.4e-09 }, "-71.90": { "LJ_approx": { "x0": 1.7286986021591825e-09, "C": 16087.514816365254, "nrep": 3.9147885683678543, "nattr": 1.012616990226475 }, "Delta_eq": 1.2553492695740507e-09 } }, "100.0": { "0.00": { "LJ_approx": { "x0": 1.4254455131143225e-09, "C": 29048.417918044444, "nrep": 3.9342659189249254, "nattr": 1.1351227816904121 }, "Delta_eq": 1.4e-09 }, "-71.90": { "LJ_approx": { "x0": 1.7087681652667724e-09, "C": 16833.83962398515, "nrep": 3.914908533680663, "nattr": 0.9697102045586926 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7046480280451768e-09, "C": 16965.15489682674, "nrep": 3.9151238997284845, "nattr": 0.9716928395857687 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5838767580748841e-09, "C": 21372.412565003375, "nrep": 3.922191259312523, "nattr": 1.0344167918733638 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.8850831984948754e-09, "C": 12173.857567383837, "nrep": 3.906959887367545, "nattr": 0.8923154523792497 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.6338405482612552e-09, "C": 19411.96069791924, "nrep": 3.9190798895919405, "nattr": 1.0073171165886772 }, "Delta_eq": 1.281743450351987e-09 } }, "500.0": { "0.00": { "LJ_approx": { "x0": 1.4236928207491738e-09, "C": 29174.423851140436, "nrep": 3.934489087598531, "nattr": 1.1244706663694597 }, "Delta_eq": 1.4e-09 } }, "15.0": { "-71.90": { "LJ_approx": { "x0": 1.722207527516432e-09, "C": 16331.124295102756, "nrep": 3.914721476976037, "nattr": 1.0021304453280393 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7180439454408102e-09, "C": 16459.15520614834, "nrep": 3.9149304238974905, "nattr": 1.0043742068193204 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5959361190370466e-09, "C": 20763.823187183425, "nrep": 3.921785737782814, "nattr": 1.0737976563473925 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.9002701433896942e-09, "C": 11795.576706463997, "nrep": 3.9070059150621814, "nattr": 0.9114123498082551 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.646473044478369e-09, "C": 18846.9803104403, "nrep": 3.918766624746869, "nattr": 1.044220870098494 }, "Delta_eq": 1.281743450351987e-09 } }, "70.0": { "-61.93": { "LJ_approx": { "x0": 1.6349587829329923e-09, "C": 19362.426097728276, "nrep": 3.9190260877993097, "nattr": 1.0116609492531905 }, "Delta_eq": 1.281743450351987e-09 }, "-71.90": { "LJ_approx": { "x0": 1.7099628637265945e-09, "C": 16789.420291577666, "nrep": 3.9148688185890483, "nattr": 0.9736446481917082 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7058388214243274e-09, "C": 16920.455606556396, "nrep": 3.9150834388621805, "nattr": 0.9756530742858303 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.584940743805642e-09, "C": 21319.35643207058, "nrep": 3.9221277053335264, "nattr": 1.0389567310289958 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.886457143504779e-09, "C": 12139.584658299475, "nrep": 3.9069481854501382, "nattr": 0.8948872453823127 }, "Delta_eq": 1.2107230911508513e-09 } }, "150.0": { "-71.90": { "LJ_approx": { "x0": 1.707781542586275e-09, "C": 16870.340248462282, "nrep": 3.914948339378328, "nattr": 0.9660711186661354 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7036651779798684e-09, "C": 17001.86272239105, "nrep": 3.9151643485118117, "nattr": 0.9680342060844059 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5830041112065094e-09, "C": 21415.64649760579, "nrep": 3.9222512220871013, "nattr": 1.0303387653647968 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.883936689519767e-09, "C": 12202.390459945682, "nrep": 3.906974649816042, "nattr": 0.8898102498996743 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.6329212539031057e-09, "C": 19452.44141782297, "nrep": 3.9191317215599946, "nattr": 1.0033715654432673 }, "Delta_eq": 1.281743450351987e-09 } }, "16.0": { "-71.90": { "LJ_approx": { "x0": 1.721337196662225e-09, "C": 16363.738563915058, "nrep": 3.9147202446411637, "nattr": 1.0005179992350384 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.717176984027311e-09, "C": 16491.97282711717, "nrep": 3.9149293522242252, "nattr": 1.0027563589061772 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5951507107307484e-09, "C": 20803.713978702526, "nrep": 3.921796023871708, "nattr": 1.0717457519944718 }, "Delta_eq": 1.302942739961778e-09 }, "-89.50": { "LJ_approx": { "x0": 1.899300769652515e-09, "C": 11819.650350288994, "nrep": 3.906993085458305, "nattr": 0.9105930969008011 }, "Delta_eq": 1.2107230911508513e-09 }, "-61.93": { "LJ_approx": { "x0": 1.6456524054221337e-09, "C": 18883.851022856303, "nrep": 3.918771854603072, "nattr": 1.042336408142498 }, "Delta_eq": 1.281743450351987e-09 }, "-58.00": { "LJ_approx": { "x0": 1.6196423302303851e-09, "C": 19847.346684716063, "nrep": 3.920294617611314, "nattr": 1.0573210567487794 }, "Delta_eq": 1.2922508866011204e-09 }, "-140.00": { "LJ_approx": { "x0": 3.536300349187651e-09, "C": 1259.968827699725, "nrep": 3.8800144975132485, "nattr": 0.35722933384459793 }, "Delta_eq": 1.1019265101358682e-09 }, "-200.00": { "LJ_approx": { "x0": 4.448077576688761e-09, "C": 658.9502615105886, "nrep": 3.8382596040618573, "nattr": -0.0007412836249291593 }, "Delta_eq": 1.0050623818936587e-09 }, "-60.00": { "LJ_approx": { "x0": 1.6326306076113028e-09, "C": 19359.641562109642, "nrep": 3.9195252093199606, "nattr": 1.0498037816897123 }, "Delta_eq": 1.2869009570089227e-09 }, "-160.00": { "LJ_approx": { "x0": 4.9755404141516864e-09, "C": 364.2006750890508, "nrep": 3.866403743827545, "nattr": 0.007733297873632889 }, "Delta_eq": 1.0662975449878705e-09 } }, "40.0": { "-71.90": { "LJ_approx": { "x0": 1.7127556130633909e-09, "C": 16685.139617894773, "nrep": 3.9147997833590855, "nattr": 0.9816110605284186 }, "Delta_eq": 1.2553492695740507e-09 }, "-140.00": { "LJ_approx": { "x0": 3.5400899114625196e-09, "C": 1255.3328685760946, "nrep": 3.8798858604365565, "nattr": 0.4340973480775988 }, "Delta_eq": 1.1019265101358682e-09 } }, "30.0": { "-71.90": { "LJ_approx": { "x0": 1.7147994934556977e-09, "C": 16608.65261608246, "nrep": 3.914764637335829, "nattr": 0.9867268079339511 }, "Delta_eq": 1.2553492695740507e-09 } }, "25.4": { "-71.90": { "LJ_approx": { "x0": 1.7162265501918752e-09, "C": 16555.217050637588, "nrep": 3.9147464050871856, "nattr": 0.9900398844251959 }, "Delta_eq": 1.2553492695740507e-09 }, "-61.93": { "LJ_approx": { "x0": 1.6408385667642563e-09, "C": 19099.831853142834, "nrep": 3.9188405082474103, "nattr": 1.0301853851264013 }, "Delta_eq": 1.281743450351987e-09 } }, "40.3": { "-71.90": { "LJ_approx": { "x0": 1.7127058661258177e-09, "C": 16686.9995037205, "nrep": 3.914800799246736, "nattr": 0.981479342025535 }, "Delta_eq": 1.2553492695740507e-09 }, "-61.93": { "LJ_approx": { "x0": 1.637531858311385e-09, "C": 19247.790954282773, "nrep": 3.918928365198691, "nattr": 1.020450016688262 }, "Delta_eq": 1.281743450351987e-09 } }, "45.0": { "-71.90": { "LJ_approx": { "x0": 1.712051746586677e-09, "C": 16711.456749369456, "nrep": 3.914814642254888, "nattr": 0.979726839958776 }, "Delta_eq": 1.2553492695740507e-09 } }, "20.0": { "-71.90": { "LJ_approx": { "x0": 1.7186388439609698e-09, "C": 16464.85454836168, "nrep": 3.9147266088229755, "nattr": 0.9952322612710219 }, "Delta_eq": 1.2553492695740507e-09 }, "-140.00": { "LJ_approx": { "x0": 3.5381843063817075e-09, "C": 1257.5756585425543, "nrep": 3.8799713679048966, "nattr": 0.38013951841061877 }, "Delta_eq": 1.1019265101358682e-09 } }, "60.0": { "-71.90": { "LJ_approx": { "x0": 1.710603828012074e-09, "C": 16765.5278159216, "nrep": 3.9148503824940146, "nattr": 0.9756024890579372 }, "Delta_eq": 1.2553492695740507e-09 } }, "34.0": { "-71.90": { "LJ_approx": { "x0": 1.7138494069983225e-09, "C": 16644.21766668786, "nrep": 3.9147795488410644, "nattr": 0.9844103642751335 }, "Delta_eq": 1.2553492695740507e-09 } }, "22.6": { "-71.90": { "LJ_approx": { "x0": 1.717347083819261e-09, "C": 16513.244775760173, "nrep": 3.914735582752492, "nattr": 0.9925083846044295 }, "Delta_eq": 1.2553492695740507e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5915583742563881e-09, "C": 20985.87279990122, "nrep": 3.9218681449692334, "nattr": 1.06169568520175 }, "Delta_eq": 1.302942739961778e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7131877388482784e-09, "C": 16642.92622648039, "nrep": 3.9149463907912256, "nattr": 0.9946488696557423 }, "Delta_eq": 1.2566584760815426e-09 } }, "45.3": { "-71.90": { "LJ_approx": { "x0": 1.7120143529753677e-09, "C": 16712.8535932463, "nrep": 3.9148154954695285, "nattr": 0.9796234512533043 }, "Delta_eq": 1.2553492695740507e-09 }, "-71.40": { "LJ_approx": { "x0": 1.7078889917152291e-09, "C": 16843.200319018524, "nrep": 3.9150287560913037, "nattr": 0.9816938577273149 }, "Delta_eq": 1.2566584760815426e-09 }, "-54.00": { "LJ_approx": { "x0": 1.5867800612156331e-09, "C": 21227.103472649964, "nrep": 3.922035517445887, "nattr": 1.0460446908119716 }, "Delta_eq": 1.302942739961778e-09 } }, "31.0": { "0.00": { "LJ_approx": { "x0": 1.429704934686545e-09, "C": 28734.55271989346, "nrep": 3.9338783401809607, "nattr": 1.155904475115303 }, "Delta_eq": 1.4e-09 }, "-71.90": { "LJ_approx": { "x0": 1.714533452342314e-09, "C": 16618.61152256, "nrep": 3.9147686086169227, "nattr": 0.9860860917336786 }, "Delta_eq": 1.2553492695740507e-09 } }, "31.1": { "-71.90": { "LJ_approx": { "x0": 1.714515996985078e-09, "C": 16619.26565583038, "nrep": 3.914768856863524, "nattr": 0.986044873313721 }, "Delta_eq": 1.2553492695740507e-09 } }, "45.2": { "-71.90": { "LJ_approx": { "x0": 1.7120203335936594e-09, "C": 16712.630620283762, "nrep": 3.914815347514677, "nattr": 0.9796407085930723 }, "Delta_eq": 1.2553492695740507e-09 } }, "28.0": { "-140.00": { "LJ_approx": { "x0": 3.539662733628618e-09, "C": 1255.7594429416959, "nrep": 3.8799232005747073, "nattr": 0.4091144934965918 }, "Delta_eq": 1.1019265101358682e-09 } }, "52.0": { "-140.00": { "LJ_approx": { "x0": 3.540099825001411e-09, "C": 1255.4092689381769, "nrep": 3.8798640679267495, "nattr": 0.44958440372614994 }, "Delta_eq": 1.1019265101358682e-09 } }, "48.0": { "-140.00": { "LJ_approx": { "x0": 3.540113162797765e-09, "C": 1255.365483546012, "nrep": 3.8798702909870157, "nattr": 0.4451084636623491 }, "Delta_eq": 1.1019265101358682e-09 } }, "25.0": { "-140.00": { "LJ_approx": { "x0": 3.5394591974100957e-09, "C": 1255.9739311868518, "nrep": 3.8799379083844014, "nattr": 0.3998616563003167 }, "Delta_eq": 1.1019265101358682e-09 } }, "24.0": { "-140.00": { "LJ_approx": { "x0": 3.539294542800419e-09, "C": 1256.1755733964683, "nrep": 3.8799434079572483, "nattr": 0.3965246264708772 }, "Delta_eq": 1.1019265101358682e-09 } }, "26.0": { "-71.90": { "LJ_approx": { "x0": 1.716013742853656e-09, "C": 16563.186031095847, "nrep": 3.914748825378261, "nattr": 0.9895571841402089 }, "Delta_eq": 1.2553492695740507e-09 } }, "47.8": { "-71.90": { "LJ_approx": { "x0": 1.7117134574481139e-09, "C": 16724.09882321637, "nrep": 3.914822340960349, "nattr": 0.9787950821308133 }, "Delta_eq": 1.2553492695740507e-09 } }, "45.9": { "-71.90": { "LJ_approx": { "x0": 1.711942249735064e-09, "C": 16715.54886943138, "nrep": 3.9148171027544123, "nattr": 0.9794265342377567 }, "Delta_eq": 1.2553492695740507e-09 } }, "30.3": { "-71.90": { "LJ_approx": { "x0": 1.7147276912050609e-09, "C": 16611.3405217478, "nrep": 3.9147656933151262, "nattr": 0.9865543051062363 }, "Delta_eq": 1.2553492695740507e-09 } }, "43.4": { "-71.90": { "LJ_approx": { "x0": 1.7122593582582404e-09, "C": 16703.696731600092, "nrep": 3.914810082243474, "nattr": 0.9802910111364295 }, "Delta_eq": 1.2553492695740507e-09 } }, "59.7": { "-71.90": { "LJ_approx": { "x0": 1.710624810423359e-09, "C": 16764.744721099407, "nrep": 3.914849818351629, "nattr": 0.9756645210872293 }, "Delta_eq": 1.2553492695740507e-09 } }, "31.6": { "-71.90": { "LJ_approx": { "x0": 1.7144029387800298e-09, "C": 16623.497279426527, "nrep": 3.914770610799719, "nattr": 0.9857697612214258 }, "Delta_eq": 1.2553492695740507e-09 } }, "36.3": { "-71.90": { "LJ_approx": { "x0": 1.7133999698436411e-09, "C": 16661.0351629387, "nrep": 3.9147874692503435, "nattr": 0.9832772435848539 }, "Delta_eq": 1.2553492695740507e-09 } }, "55.7": { "-71.90": { "LJ_approx": { "x0": 1.710943143434582e-09, "C": 16752.866849028967, "nrep": 3.914841318636611, "nattr": 0.9766031714646571 }, "Delta_eq": 1.2553492695740507e-09 } }, "48.5": { "-71.90": { "LJ_approx": { "x0": 1.7116397026523852e-09, "C": 16726.853954116246, "nrep": 3.9148240832062964, "nattr": 0.9785886152529253 }, "Delta_eq": 1.2553492695740507e-09 } }, "42.2": { "-71.90": { "LJ_approx": { "x0": 1.712423436257706e-09, "C": 16697.560003586437, "nrep": 3.9148066437354734, "nattr": 0.980728415714794 }, "Delta_eq": 1.2553492695740507e-09 } }, "27.9": { "-71.90": { "LJ_approx": { "x0": 1.7154107379662085e-09, "C": 16585.76539172651, "nrep": 3.914756263081378, "nattr": 0.9881670164800336 }, "Delta_eq": 1.2553492695740507e-09 } }, "36.8": { "-71.90": { "LJ_approx": { "x0": 1.7133062675842822e-09, "C": 16664.541652779935, "nrep": 3.9147891722285952, "nattr": 0.9830389492355903 }, "Delta_eq": 1.2553492695740507e-09 } }, "24.3": { "-71.90": { "LJ_approx": { "x0": 1.7166571039691342e-09, "C": 16539.090593408535, "nrep": 3.91474189560473, "nattr": 0.9910014451873015 }, "Delta_eq": 1.2553492695740507e-09 } }, "80.0": { "-61.93": { "LJ_approx": { "x0": 1.6344991861630834e-09, "C": 19382.81542285848, "nrep": 3.9190471576123405, "nattr": 1.0099259239851026 }, "Delta_eq": 1.281743450351987e-09 } }, "22.0": { "-71.90": { "LJ_approx": { "x0": 1.7176213744119541e-09, "C": 16502.97011663288, "nrep": 3.914733365777537, "nattr": 0.9930970282708068 }, "Delta_eq": 1.2553492695740507e-09 } } } \ No newline at end of file diff --git a/PySONIC/core/model.py b/PySONIC/core/model.py index dda12f7..8c5cc42 100644 --- a/PySONIC/core/model.py +++ b/PySONIC/core/model.py @@ -1,228 +1,228 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Email: theo.lemaire@epfl.ch # @Date: 2017-08-03 11:53:04 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2020-04-28 20:08:46 +# @Last Modified time: 2020-06-03 16:15:58 from functools import wraps from inspect import getdoc import abc import inspect import numpy as np from boltons.strutils import cardinalize from .batches import Batch from ..threshold import titrate from ..utils import * class Model(metaclass=abc.ABCMeta): ''' Generic model interface. ''' @property @abc.abstractmethod def tscale(self): ''' Relevant temporal scale of the model. ''' raise NotImplementedError @property @abc.abstractmethod def simkey(self): ''' Keyword used to characterize simulations made with the model. ''' raise NotImplementedError @abc.abstractmethod def __repr__(self): ''' String representation. ''' raise NotImplementedError def params(self): ''' Return a dictionary of all model parameters (class and instance attributes) ''' def toAvoid(p): return (p.startswith('__') and p.endswith('__')) or p.startswith('_abc_') class_attrs = inspect.getmembers(self.__class__, lambda a: not(inspect.isroutine(a))) inst_attrs = inspect.getmembers(self, lambda a: not(inspect.isroutine(a))) class_attrs = [a for a in class_attrs if not toAvoid(a[0])] inst_attrs = [a for a in inst_attrs if not toAvoid(a[0]) and a not in class_attrs] params_dict = {a[0]: a[1] for a in class_attrs + inst_attrs} return params_dict @classmethod def description(cls): return getdoc(cls).split('\n', 1)[0].strip() @abc.abstractmethod def copy(self): raise NotImplementedError @staticmethod @abc.abstractmethod def inputs(): ''' Return an informative dictionary on input variables used to simulate the model. ''' raise NotImplementedError @abc.abstractmethod def filecodes(self, *args): ''' Return a dictionary of string-encoded inputs used for file naming. ''' raise NotImplementedError def filecode(self, *args): return filecode(self, *args) @classmethod @abc.abstractmethod def getPltVars(self, *args, **kwargs): ''' Return a dictionary with information about all plot variables related to the model. ''' raise NotImplementedError @property @abc.abstractmethod def pltScheme(self): ''' Return a dictionary model plot variables grouped by context. ''' raise NotImplementedError @staticmethod def checkOutputDir(queuefunc): ''' Check if an output directory is provided in input arguments, and if so, add it to each item of the returned queue (along with an "overwrite" boolean). ''' @wraps(queuefunc) def wrapper(self, *args, **kwargs): outputdir = kwargs.get('outputdir') queue = queuefunc(self, *args, **kwargs) if outputdir is not None: overwrite = kwargs.get('overwrite', True) queue = queuefunc(self, *args, **kwargs) for i, params in enumerate(queue): position_args, keyword_args = Batch.resolve(params) keyword_args['overwrite'] = overwrite keyword_args['outputdir'] = outputdir queue[i] = (position_args, keyword_args) else: if len(queue) > 5: logger.warning('Running more than 5 simulations without file saving') return queue return wrapper @classmethod @abc.abstractmethod def simQueue(cls, *args, outputdir=None, overwrite=True): raise NotImplementedError @staticmethod @abc.abstractmethod def checkInputs(self, *args): ''' Check the validity of simulation input parameters. ''' raise NotImplementedError @abc.abstractmethod def derivatives(self, *args, **kwargs): ''' Compute ODE derivatives for a specific set of ODE states and external parameters. ''' raise NotImplementedError @abc.abstractmethod def simulate(self, *args, **kwargs): ''' Simulate the model's differential system for specific input parameters and return output data in a dataframe. ''' raise NotImplementedError @classmethod @abc.abstractmethod def meta(self, *args): ''' Return an informative dictionary about model and simulation parameters. ''' raise NotImplementedError @staticmethod def addMeta(simfunc): ''' Add informative dictionary to simulation output ''' @wraps(simfunc) def wrapper(self, *args, **kwargs): data, tcomp = timer(simfunc)(self, *args, **kwargs) logger.debug('completed in %ss', si_format(tcomp, 1)) meta_dict = getMeta(self, simfunc, *args, **kwargs) meta_dict['tcomp'] = tcomp return data, meta_dict return wrapper @staticmethod def logNSpikes(simfunc): ''' Log number of detected spikes on charge profile of simulation output. ''' @wraps(simfunc) def wrapper(self, *args, **kwargs): out = simfunc(self, *args, **kwargs) if out is None: return None data, meta = out nspikes = self.getNSpikes(data) logger.debug(f'{nspikes} {cardinalize("spike", nspikes)} detected') return data, meta return wrapper @staticmethod def checkSimParams(simfunc): ''' Check simulation parameters before launching simulation. ''' @wraps(simfunc) def wrapper(self, *args, **kwargs): args, kwargs = alignWithMethodDef(simfunc, args, kwargs) self.checkInputs(*args, *list(kwargs.values())) return simfunc(self, *args, **kwargs) return wrapper @staticmethod def logDesc(simfunc): ''' Log description of model and simulation parameters. ''' @wraps(simfunc) def wrapper(self, *args, **kwargs): args, kwargs = alignWithMethodDef(simfunc, args, kwargs) logger.info(self.desc(getMeta(self, simfunc, *args, **kwargs))) return simfunc(self, *args, **kwargs) return wrapper def titrate(self, *args, **kwargs): return titrate(self, *args, **kwargs) @staticmethod def checkTitrate(simfunc): ''' If unresolved drive provided in the list of input parameters, perform a titration to find the threshold drive, and simulate with resolved drive. ''' @wraps(simfunc) def wrapper(self, *args, **kwargs): # Extract drive object from args drive, *other_args = args # If drive is titratable and not fully resolved if drive.is_searchable: if not drive.is_resolved: # Titrate xthr = self.titrate(*args) # If no threshold was found, return None if np.isnan(xthr): logger.error( f'Could not find threshold {drive.inputs()[drive.xkey]["desc"]}') return None # Otherwise, update args list with resovled drive args = (drive.updatedX(xthr), *other_args) # Execute simulation function return simfunc(self, *args, **kwargs) return wrapper def simAndSave(self, *args, **kwargs): return simAndSave(self, *args, **kwargs) def getOutput(self, *args, **kwargs): ''' Get simulation output data for a specific parameters combination, by looking for an output file into a specific directory. If a corresponding output file is not found in the specified directory, the model is first run and results are saved in the output file. ''' fpath = self.simAndSave(*args, overwrite=False, **kwargs) return loadData(fpath) diff --git a/PySONIC/core/solvers.py b/PySONIC/core/solvers.py index bd2f24e..e5d9461 100644 --- a/PySONIC/core/solvers.py +++ b/PySONIC/core/solvers.py @@ -1,634 +1,630 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Email: theo.lemaire@epfl.ch # @Date: 2019-05-28 14:45:12 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2020-05-02 17:08:26 +# @Last Modified time: 2020-06-03 16:44:22 import numpy as np import pandas as pd from scipy.interpolate import interp1d from scipy.integrate import ode, odeint, solve_ivp from tqdm import tqdm from ..utils import * from ..constants import * class ODESolver: ''' Generic interface to ODE solver object. ''' def __init__(self, ykeys, dfunc, dt=None): ''' Initialization. :param ykeys: list of differential variables names :param dfunc: derivative function :param dt: integration time step (s) ''' self.ykeys = ykeys self.dfunc = dfunc self.dt = dt def checkFunc(self, key, value): if not callable(value): raise ValueError(f'{key} function must be a callable object') @property def ykeys(self): return self._ykeys @ykeys.setter def ykeys(self, value): if not isIterable(value): value = list(value) for item in value: if not isinstance(item, str): raise ValueError('ykeys must be a list of strings') self._ykeys = value @property def nvars(self): return len(self.ykeys) @property def dfunc(self): return self._dfunc @dfunc.setter def dfunc(self, value): self.checkFunc('derivative', value) self._dfunc = value @property def dt(self): return self._dt @dt.setter def dt(self, value): if value is None: self._dt = None else: if not isinstance(value, float): raise ValueError('time step must be float-typed') if value <= 0: raise ValueError('time step must be strictly positive') self._dt = value def getNSamples(self, t0, tend, dt=None): ''' Get the number of samples required to integrate across 2 times with a given time step. :param t0: initial time (s) :param tend: final time (s) :param dt: integration time step (s) :return: number of required samples, rounded to nearest integer ''' if dt is None: dt = self.dt return max(int(np.round((tend - t0) / dt)), 2) def getTimeVector(self, t0, tend, **kwargs): ''' Get the time vector required to integrate from an initial to a final time with a specific time step. :param t0: initial time (s) :param tend: final time (s) :return: vector going from current time to target time with appropriate step (s) ''' return np.linspace(t0, tend, self.getNSamples(t0, tend, **kwargs)) def initialize(self, y0, t0=0.): ''' Initialize global time vector, state vector and solution array. :param y0: dictionary of initial conditions :param t0: optional initial time or time vector (s) ''' keys = list(y0.keys()) if len(keys) != len(self.ykeys): raise ValueError("Initial conditions do not match system's dimensions") for k in keys: if k not in self.ykeys: raise ValueError(f'{k} is not a differential variable') y0 = {k: np.asarray(v) if isIterable(v) else np.array([v]) for k, v in y0.items()} ref_size = y0[keys[0]].size if not all(v.size == ref_size for v in y0.values()): raise ValueError('dimensions of initial conditions are inconsistent') self.y = np.array(list(y0.values())).T self.t = np.ones(self.y.shape[0]) * t0 self.x = np.zeros(self.t.size) def append(self, t, y): ''' Append to global time vector, state vector and solution array. :param t: new time vector to append (s) :param y: new solution matrix to append ''' self.t = np.concatenate((self.t, t)) self.y = np.concatenate((self.y, y), axis=0) self.x = np.concatenate((self.x, np.ones(t.size) * self.xref)) def bound(self, tbounds): ''' Restrict global time vector, state vector ans solution matrix within specific time range. :param tbounds: minimal and maximal allowed time restricting the global arrays (s). ''' i_bounded = np.logical_and(self.t >= tbounds[0], self.t <= tbounds[1]) self.t = self.t[i_bounded] self.y = self.y[i_bounded, :] self.x = self.x[i_bounded] @staticmethod def timeStr(t): return f'{t * 1e3:.5f} ms' def timedlog(self, s, t=None): ''' Add preceding time information to log string. ''' if t is None: t = self.t[-1] return f't = {self.timeStr(t)}: {s}' def integrateUntil(self, target_t, remove_first=False): ''' Integrate system until a target time and append new arrays to global arrays. :param target_t: target time (s) :param remove_first: optional boolean specifying whether to remove the first index of the new arrays before appending ''' if target_t < self.t[-1]: raise ValueError(f'target time ({target_t} s) precedes current time {self.t[-1]} s') elif target_t == self.t[-1]: t, y = self.t[-1], self.y[-1] if self.dt is None: sol = solve_ivp( self.dfunc, [self.t[-1], target_t], self.y[-1], method='LSODA') t, y = sol.t, sol.y.T else: t = self.getTimeVector(self.t[-1], target_t) y = odeint(self.dfunc, self.y[-1], t, tfirst=True) if remove_first: t, y = t[1:], y[1:] self.append(t, y) def resampleArrays(self, t, y, target_dt): ''' Resample a time vector and soluton matrix to target time step. :param t: time vector to resample (s) :param y: solution matrix to resample :target_dt: target time step (s) :return: resampled time vector and solution matrix ''' tnew = self.getTimeVector(t[0], t[-1], dt=target_dt) ynew = np.array([np.interp(tnew, t, x) for x in y.T]).T return tnew, ynew def resample(self, target_dt): ''' Resample global arrays to a new target time step. :target_dt: target time step (s) ''' tnew, self.y = self.resampleArrays(self.t, self.y, target_dt) self.x = interp1d(self.t, self.x, kind='nearest', assume_sorted=True)(tnew) self.t = tnew def solve(self, y0, tstop, **kwargs): ''' Simulate system for a given time interval for specific initial conditions. :param y0: dictionary of initial conditions :param tstop: stopping time (s) ''' # Initialize system self.initialize(y0, **kwargs) # Integrate until tstop self.integrateUntil(tstop, remove_first=True) @property def solution(self): ''' Return solution as a pandas dataframe. :return: timeseries dataframe with labeled time, state and variables vectors. ''' - return pd.DataFrame({ - 't': self.t, - 'stimstate': self.x, - **{k: self.y[:, i] for i, k in enumerate(self.ykeys)} - }) + return TimeSeries(self.t, self.x, {k: self.y[:, i] for i, k in enumerate(self.ykeys)}) def __call__(self, *args, target_dt=None, max_nsamples=None, **kwargs): ''' Specific call method: solve the system, resample solution if needed, and return solution dataframe. ''' self.solve(*args, **kwargs) if target_dt is not None: self.resample(target_dt) elif max_nsamples is not None and self.t.size > max_nsamples: self.resample(np.ptp(self.t) / max_nsamples) return self.solution class PeriodicSolver(ODESolver): ''' ODE solver that integrates periodically until a stable periodic behavior is detected.''' def __init__(self, T, *args, primary_vars=None, **kwargs): ''' Initialization. :param T: periodicity (s) :param primary_vars: keys of the primary solution variables to check for stability ''' super().__init__(*args, **kwargs) self.T = T self.primary_vars = primary_vars @property def T(self): return self._T @T.setter def T(self, value): if not isinstance(value, float): raise ValueError('periodicity must be float-typed') if value <= 0: raise ValueError('periodicity must be strictly positive') self._T = value @property def primary_vars(self): return self._primary_vars @primary_vars.setter def primary_vars(self, value): if value is None: # If none specified, set all variables to be checked for stability value = self.ykeys if not isIterable(value): value = [value] for item in value: if item not in self.ykeys: raise ValueError(f'{item} is not a differential variable') self._primary_vars = value @property def i_primary_vars(self): return [self.ykeys.index(k) for k in self.primary_vars] @property def xref(self): return 1. def getNPerCycle(self, dt=None): ''' Compute number of samples per cycle. :param dt: optional integration time step (s) :return: number of samples per cycle, rounded to nearest integer ''' # if time step not provided, compute dt from last 2 elements of time vector if dt is None: dt = self.t[-1] - self.t[-2] return int(np.round(self.T / dt)) def getCycle(self, i, ivars=None): ''' Get time vector and solution matrix for the ith cycle. :param i: cycle index :param ivars: optional indexes of subset of variables of interest :return: solution matrix for ith cycle, filtered for variables of interest ''' # By default, consider indexes of all variables if ivars is None: ivars = range(self.nvars) # Get first time index where time difference differs from solver's time step, if any i_diff_dt = np.where(np.invert(np.isclose(np.diff(self.t)[::-1], self.dt)))[0] # Determine the number of samples to consider in the backwards direction nsamples = i_diff_dt[0] if i_diff_dt.size > 0 else self.t.size npc = self.getNPerCycle() # number of samples per cycle ncycles = int(np.round(nsamples / npc)) # rounded number of cycles ioffset = self.t.size - npc * ncycles # corresponding initial index offset # Check index validity if i < 0: i += ncycles if i < 0 or i >= ncycles: raise ValueError('Invalid index') # Compute start and end indexes istart = i * npc + ioffset iend = istart + npc # Return arrays for corresponding cycle return self.t[istart:iend], self.y[istart:iend, ivars] def isPeriodicallyStable(self): ''' Assess the periodic stabilization of a solution, by evaluating the deviation of system variables between the last two periods. :return: boolean stating whether the solution is periodically stable or not ''' # Extract the last 2 cycles of the primary variables from the solution y_last, y_prec = [self.getCycle(-i, ivars=self.i_primary_vars)[1] for i in [1, 2]] # Evaluate ratios of RMSE between the two cycles / variation range over the last cycle ratios = rmse(y_last, y_prec, axis=0) / np.ptp(y_last, axis=0) # Classify solution as periodically stable only if all ratios are below critical threshold return np.all(ratios < MAX_RMSE_PTP_RATIO) def integrateCycle(self): ''' Integrate system for a cycle. ''' self.integrateUntil(self.t[-1] + self.T, remove_first=True) def solve(self, y0, nmax=None, **kwargs): ''' Simulate system with a specific periodicity until stopping criterion is met. :param y0: dictionary of initial conditions :param nmax: maximum number of integration cycles (optional) ''' if nmax is None: nmax = NCYCLES_MAX # Initialize system if y0 is not None: self.initialize(y0, **kwargs) # Integrate system for 2 cycles for i in range(2): self.integrateCycle() # Keep integrating system periodically until stopping criterion is met while not self.isPeriodicallyStable() and i < nmax: self.integrateCycle() i += 1 # Log stopping criterion if i == nmax: logger.warning(self.timedlog(f'criterion not met -> stopping after {i} cycles')) else: logger.debug(self.timedlog(f'stopping criterion met after {i} cycles')) class EventDrivenSolver(ODESolver): ''' Event-driven ODE solver. ''' def __init__(self, eventfunc, *args, event_params=None, **kwargs): ''' Initialization. :param eventfunc: function called on each event :param event_params: dictionary of parameters used by the derivatives function ''' super().__init__(*args, **kwargs) self.eventfunc = eventfunc self.assignEventParams(event_params) def assignEventParams(self, event_params): ''' Assign event parameters as instance attributes. ''' if event_params is not None: for k, v in event_params.items(): setattr(self, k, v) @property def eventfunc(self): return self._eventfunc @eventfunc.setter def eventfunc(self, value): self.checkFunc('event', value) self._eventfunc = value @property def xref(self): return self._xref @xref.setter def xref(self, value): self._xref = value def initialize(self, *args, **kwargs): self.xref = 0 super().initialize(*args, **kwargs) def fireEvent(self, xevent): ''' Call event function and set new xref value. ''' if xevent is not None: if xevent == 'log': self.logProgress() else: self.eventfunc(xevent) self.xref = xevent def initLog(self, logfunc, n): ''' Initialize progress logger. ''' self.logfunc = logfunc if self.logfunc is None: setHandler(logger, TqdmHandler(my_log_formatter)) self.pbar = tqdm(total=n) else: self.np = n logger.debug('integrating stimulus') def logProgress(self): ''' Log simulation progress. ''' if self.logfunc is None: self.pbar.update() else: logger.debug(self.timedlog(self.logfunc(self.y[-1]))) def terminateLog(self): ''' Terminate progress logger. ''' if self.logfunc is None: self.pbar.close() else: logger.debug('integration completed') def sortEvents(self, events): ''' Sort events pairs by occurence time. ''' return sorted(events, key=lambda x: x[0]) def solve(self, y0, events, tstop, log_period=None, logfunc=None, **kwargs): ''' Simulate system for a specific stimulus application pattern. :param y0: 1D vector of initial conditions :param events: list of events :param tstop: stopping time (s) ''' # Sort events according to occurrence time events = self.sortEvents(events) # Make sure all events occur before tstop if events[-1][0] > tstop: raise ValueError('all events must occur before stopping time') if log_period is not None: # Add log events if any tlogs = np.arange(kwargs.get('t0', 0.), tstop, log_period)[1:] if tstop not in tlogs: tlogs = np.hstack((tlogs, [tstop])) events = self.sortEvents(events + [(t, 'log') for t in tlogs]) self.initLog(logfunc, tlogs.size) else: # Otherwise, add None event at tstop events.append((tstop, None)) # Initialize system self.initialize(y0, **kwargs) # For each upcoming event for i, (tevent, xevent) in enumerate(events): self.integrateUntil( # integrate until event time tevent, remove_first=i > 0 and events[i - 1][1] == 'log') self.fireEvent(xevent) # fire event # Terminate log if any if log_period is not None: self.terminateLog() class HybridSolver(EventDrivenSolver, PeriodicSolver): def __init__(self, ykeys, dfunc, dfunc_sparse, predfunc, eventfunc, T, dense_vars, dt_dense, dt_sparse, **kwargs): ''' Initialization. :param ykeys: list of differential variables names :param dfunc: derivatives function :param dfunc_sparse: derivatives function for sparse integration periods :param predfunc: function computing the extra arguments necessary for sparse integration :param eventfunc: function called on each event :param T: periodicity (s) :param dense_vars: list of fast-evolving differential variables :param dt_dense: dense integration time step (s) :param dt_sparse: sparse integration time step (s) ''' PeriodicSolver.__init__( self, T, ykeys, dfunc, primary_vars=kwargs.get('primary_vars', None), dt=dt_dense) self.eventfunc = eventfunc self.assignEventParams(kwargs.get('event_params', None)) self.predfunc = predfunc self.dense_vars = dense_vars self.dt_sparse = dt_sparse self.sparse_solver = ode(dfunc_sparse) self.sparse_solver.set_integrator('dop853', nsteps=SOLVER_NSTEPS, atol=1e-12) @property def predfunc(self): return self._predfunc @predfunc.setter def predfunc(self, value): self.checkFunc('prediction', value) self._predfunc = value @property def dense_vars(self): return self._dense_vars @dense_vars.setter def dense_vars(self, value): if value is None: # If none specified, set all variables as dense variables value = self.ykeys if not isIterable(value): value = [value] for item in value: if item not in self.ykeys: raise ValueError(f'{item} is not a differential variable') self._dense_vars = value @property def is_dense_var(self): return np.array([x in self.dense_vars for x in self.ykeys]) @property def is_sparse_var(self): return np.invert(self.is_dense_var) def integrateSparse(self, ysparse, target_t): ''' Integrate sparse system until a specific time. :param ysparse: sparse 1-cycle solution matrix of fast-evolving variables :paramt target_t: target time (s) ''' # Compute number of samples in the sparse cycle solution npc = ysparse.shape[0] # Initialize time vector and solution array for the current interval n = int(np.ceil((target_t - self.t[-1]) / self.dt_sparse)) t = np.linspace(self.t[-1], target_t, n + 1)[1:] y = np.empty((n, self.y.shape[1])) # Initialize sparse integrator self.sparse_solver.set_initial_value(self.y[-1, self.is_sparse_var], self.t[-1]) for i, tt in enumerate(t): # Integrate to next time only if dt is above given threshold if tt - self.sparse_solver.t > MIN_SPARSE_DT: self.sparse_solver.set_f_params(self.predfunc(ysparse[i % npc])) self.sparse_solver.integrate(tt) if not self.sparse_solver.successful(): raise ValueError(self.timedlog('integration error', tt)) # Assign solution values (computed and propagated) to sparse solution array y[i, self.is_dense_var] = ysparse[i % npc, self.is_dense_var] y[i, self.is_sparse_var] = self.sparse_solver.y # Append to global solution self.append(t, y) def solve(self, y0, events, tstop, update_interval, logfunc=None, **kwargs): ''' Integrate system using a hybrid scheme: - First, the full ODE system is integrated for a few cycles with a dense time granularity until a stopping criterion is met - Second, the profiles of all variables over the last cycle are downsampled to a far lower (i.e. sparse) sampling rate - Third, a subset of the ODE system is integrated with a sparse time granularity, for the remaining of the time interval, while the remaining variables are periodically expanded from their last cycle profile. ''' # Sort events according to occurrence time events = self.sortEvents(events) # Make sure all events occur before tstop if events[-1][0] > tstop: raise ValueError('all events must occur before stopping time') # Add None event at tstop events.append((tstop, None)) # Initialize system self.initialize(y0) # Initialize event iterator ievent = iter(events) tevent, xevent = next(ievent) stop = False # While final event is not reached while not stop: # Determine end-time of current interval tend = min(tevent, self.t[-1] + update_interval) # If time interval encompasses at least one cycle, solve periodic system nmax = int(np.round((tend - self.t[-1]) / self.T)) if nmax > 0: logger.debug(self.timedlog('integrating dense system')) PeriodicSolver.solve(self, None, nmax=nmax) # If end-time of current interval has been exceeded, bound solution to that time if self.t[-1] > tend: logger.debug(self.timedlog(f'bounding system at {self.timeStr(tend)}')) self.bound((self.t[0], tend)) # If end-time of current interval has not been reached if self.t[-1] < tend: # Get solution over last cycle and resample it to sparse time step tlast, ylast = self.getCycle(-1) _, ysparse = self.resampleArrays(tlast, ylast, self.dt_sparse) # Integrate sparse system for the rest of the current interval logger.debug(self.timedlog(f'integrating sparse system until {self.timeStr(tend)}')) self.integrateSparse(ysparse, tend) # If end-time corresponds to event, fire it and move to next event if self.t[-1] == tevent: logger.debug(self.timedlog('firing event')) self.fireEvent(xevent) try: tevent, xevent = next(ievent) except StopIteration: stop = True diff --git a/PySONIC/neurons/thalamic.py b/PySONIC/neurons/thalamic.py index 9982453..5667844 100644 --- a/PySONIC/neurons/thalamic.py +++ b/PySONIC/neurons/thalamic.py @@ -1,377 +1,377 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Email: theo.lemaire@epfl.ch # @Date: 2017-07-31 15:20:54 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2020-04-23 12:41:31 +# @Last Modified time: 2020-06-03 16:44:42 import numpy as np from ..core import PointNeuron, addSonicFeatures from ..constants import Z_Ca class Thalamic(PointNeuron): ''' Generic thalamic neuron Reference: *Plaksin, M., Kimmel, E., and Shoham, S. (2016). Cell-Type-Selective Effects of Intramembrane Cavitation as a Unifying Theoretical Framework for Ultrasonic Neuromodulation. eNeuro 3.* ''' # ------------------------------ Biophysical parameters ------------------------------ # Resting parameters Cm0 = 1e-2 # Membrane capacitance (F/m2) # Reversal potentials (mV) ENa = 50.0 # Sodium EK = -90.0 # Potassium ECa = 120.0 # Calcium # ------------------------------ Gating states kinetics ------------------------------ @classmethod def alpham(cls, Vm): return 0.32 * cls.vtrap(13 - (Vm - cls.VT), 4) * 1e3 # s-1 @classmethod def betam(cls, Vm): return 0.28 * cls.vtrap((Vm - cls.VT) - 40, 5) * 1e3 # s-1 @classmethod def alphah(cls, Vm): return 0.128 * np.exp(-((Vm - cls.VT) - 17) / 18) * 1e3 # s-1 @classmethod def betah(cls, Vm): return 4 / (1 + np.exp(-((Vm - cls.VT) - 40) / 5)) * 1e3 # s-1 @classmethod def alphan(cls, Vm): return 0.032 * cls.vtrap(15 - (Vm - cls.VT), 5) * 1e3 # s-1 @classmethod def betan(cls, Vm): return 0.5 * np.exp(-((Vm - cls.VT) - 10) / 40) * 1e3 # s-1 # ------------------------------ States derivatives ------------------------------ @classmethod def derStates(cls): return { 'm': lambda Vm, x: cls.alpham(Vm) * (1 - x['m']) - cls.betam(Vm) * x['m'], 'h': lambda Vm, x: cls.alphah(Vm) * (1 - x['h']) - cls.betah(Vm) * x['h'], 'n': lambda Vm, x: cls.alphan(Vm) * (1 - x['n']) - cls.betan(Vm) * x['n'], 's': lambda Vm, x: (cls.sinf(Vm) - x['s']) / cls.taus(Vm), 'u': lambda Vm, x: (cls.uinf(Vm) - x['u']) / cls.tauu(Vm) } # ------------------------------ Steady states ------------------------------ @classmethod def steadyStates(cls): return { 'm': lambda Vm: cls.alpham(Vm) / (cls.alpham(Vm) + cls.betam(Vm)), 'h': lambda Vm: cls.alphah(Vm) / (cls.alphah(Vm) + cls.betah(Vm)), 'n': lambda Vm: cls.alphan(Vm) / (cls.alphan(Vm) + cls.betan(Vm)), 's': lambda Vm: cls.sinf(Vm), 'u': lambda Vm: cls.uinf(Vm) } # ------------------------------ Membrane currents ------------------------------ @classmethod def iNa(cls, m, h, Vm): ''' Sodium current ''' return cls.gNabar * m**3 * h * (Vm - cls.ENa) # mA/m2 @classmethod def iKd(cls, n, Vm): ''' delayed-rectifier Potassium current ''' return cls.gKdbar * n**4 * (Vm - cls.EK) @classmethod def iCaT(cls, s, u, Vm): ''' low-threshold (Ts-type) Calcium current ''' return cls.gCaTbar * s**2 * u * (Vm - cls.ECa) # mA/m2 @classmethod def iLeak(cls, Vm): ''' non-specific leakage current ''' return cls.gLeak * (Vm - cls.ELeak) # mA/m2 @classmethod def currents(cls): return { 'iNa': lambda Vm, states: cls.iNa(states['m'], states['h'], Vm), 'iKd': lambda Vm, states: cls.iKd(states['n'], Vm), 'iCaT': lambda Vm, states: cls.iCaT(states['s'], states['u'], Vm), 'iLeak': lambda Vm, _: cls.iLeak(Vm) } @addSonicFeatures class ThalamicRE(Thalamic): ''' Thalamic reticular neuron References: *Destexhe, A., Contreras, D., Steriade, M., Sejnowski, T.J., and Huguenard, J.R. (1996). In vivo, in vitro, and computational analysis of dendritic calcium currents in thalamic reticular neurons. J. Neurosci. 16, 169–185.* *Huguenard, J.R., and Prince, D.A. (1992). A novel T-type current underlies prolonged Ca(2+)-dependent burst firing in GABAergic neurons of rat thalamic reticular nucleus. J. Neurosci. 12, 3804–3817.* ''' # Neuron name name = 'RE' # ------------------------------ Biophysical parameters ------------------------------ # Resting parameters Vm0 = -89.5 # Membrane potential (mV) # Reversal potentials (mV) ELeak = -90.0 # Non-specific leakage # Maximal channel conductances (S/m2) gNabar = 2000.0 # Sodium gKdbar = 200.0 # Delayed-rectifier Potassium gCaTbar = 30.0 # Low-threshold Calcium gLeak = 0.5 # Non-specific leakage # Additional parameters VT = -67.0 # Spike threshold adjustment parameter (mV) area = 14.00e-9 # Cell membrane area (m2) # ------------------------------ States names & descriptions ------------------------------ states = { 'm': 'iNa activation gate', 'h': 'iNa inactivation gate', 'n': 'iKd gate', 's': 'iCaT activation gate', 'u': 'iCaT inactivation gate' } # ------------------------------ Gating states kinetics ------------------------------ @staticmethod def sinf(Vm): return 1.0 / (1.0 + np.exp(-(Vm + 52.0) / 7.4)) @staticmethod def taus(Vm): return (1 + 0.33 / (np.exp((Vm + 27.0) / 10.0) + np.exp(-(Vm + 102.0) / 15.0))) * 1e-3 # s @staticmethod def uinf(Vm): return 1.0 / (1.0 + np.exp((Vm + 80.0) / 5.0)) @staticmethod def tauu(Vm): return (28.3 + 0.33 / ( np.exp((Vm + 48.0) / 4.0) + np.exp(-(Vm + 407.0) / 50.0))) * 1e-3 # s @addSonicFeatures class ThalamoCortical(Thalamic): ''' Thalamo-cortical neuron References: *Pospischil, M., Toledo-Rodriguez, M., Monier, C., Piwkowska, Z., Bal, T., Frégnac, Y., Markram, H., and Destexhe, A. (2008). Minimal Hodgkin-Huxley type models for different classes of cortical and thalamic neurons. Biol Cybern 99, 427–441.* *Destexhe, A., Bal, T., McCormick, D.A., and Sejnowski, T.J. (1996). Ionic mechanisms underlying synchronized oscillations and propagating waves in a model of ferret thalamic slices. J. Neurophysiol. 76, 2049–2070.* Model of Ca2+ buffering and contribution from iCaT derived from: *McCormick, D.A., and Huguenard, J.R. (1992). A model of the electrophysiological properties of thalamocortical relay neurons. J. Neurophysiol. 68, 1384–1400.* ''' # Neuron name name = 'TC' # ------------------------------ Biophysical parameters ------------------------------ # Resting parameters # Vm0 = -63.4 # Membrane potential (mV) Vm0 = -61.93 # Membrane potential (mV) # Reversal potentials (mV) EH = -40.0 # Mixed cationic current ELeak = -70.0 # Non-specific leakage # Maximal channel conductances (S/m2) gNabar = 900.0 # Sodium gKdbar = 100.0 # Delayed-rectifier Potassium gCaTbar = 20.0 # Low-threshold Calcium gKLeak = 0.138 # Leakage Potassium gHbar = 0.175 # Mixed cationic current gLeak = 0.1 # Non-specific leakage # Additional parameters VT = -52.0 # Spike threshold adjustment parameter (mV) Vx = 0.0 # Voltage-dependence uniform shift factor at 36°C (mV) taur_Cai = 5e-3 # decay time constant for intracellular Ca2+ dissolution (s) Cai_min = 50e-9 # minimal intracellular Calcium concentration (M) deff = 100e-9 # effective depth beneath membrane for intracellular [Ca2+] calculation nCa = 4 # number of Calcium binding sites on regulating factor k1 = 2.5e22 # intracellular Ca2+ regulation factor (M-4 s-1) k2 = 0.4 # intracellular Ca2+ regulation factor (s-1) k3 = 100.0 # intracellular Ca2+ regulation factor (s-1) k4 = 1.0 # intracellular Ca2+ regulation factor (s-1) area = 29.00e-9 # Cell membrane area (m2) # ------------------------------ States names & descriptions ------------------------------ states = { 'm': 'iNa activation gate', 'h': 'iNa inactivation gate', 'n': 'iKd gate', 's': 'iCaT activation gate', 'u': 'iCaT inactivation gate', 'Cai': 'submembrane Ca2+ concentration (M)', 'P0': 'proportion of unbound iH regulating factor', 'O': 'iH gate open state', 'C': 'iH gate closed state', } def __new__(cls): cls.current_to_molar_rate_Ca = cls.currentToConcentrationRate(Z_Ca, cls.deff) return super(ThalamoCortical, cls).__new__(cls) @staticmethod def OL(O, C): ''' O-gate locked-open probability ''' return 1 - O - C @property def pltScheme(self): pltscheme = super().pltScheme pltscheme['i_{H}\\ kin.'] = ['O', 'OL', 'P0'] pltscheme['[Ca^{2+}]_i'] = ['Cai'] return pltscheme @classmethod def getPltVars(cls, wrapleft='df["', wrapright='"]'): return {**super().getPltVars(wrapleft, wrapright), **{ 'Cai': { 'desc': 'sumbmembrane Ca2+ concentration', 'label': '[Ca^{2+}]_i', 'unit': 'uM', 'factor': 1e6 }, 'OL': { 'desc': 'iH O-gate locked-opening', 'label': 'O_L', 'bounds': (-0.1, 1.1), 'func': f'OL({wrapleft}O{wrapright}, {wrapleft}C{wrapright})' }, 'P0': { 'desc': 'iH regulating factor activation', 'label': 'P_0', 'bounds': (-0.1, 1.1) } }} # ------------------------------ Gating states kinetics ------------------------------ @classmethod def sinf(cls, Vm): return 1.0 / (1.0 + np.exp(-(Vm + cls.Vx + 57.0) / 6.2)) @classmethod def taus(cls, Vm): x = np.exp(-(Vm + cls.Vx + 132.0) / 16.7) + np.exp((Vm + cls.Vx + 16.8) / 18.2) return 1.0 / 3.7 * (0.612 + 1.0 / x) * 1e-3 # s @classmethod def uinf(cls, Vm): return 1.0 / (1.0 + np.exp((Vm + cls.Vx + 81.0) / 4.0)) @classmethod def tauu(cls, Vm): if Vm + cls.Vx < -80.0: return 1.0 / 3.7 * np.exp((Vm + cls.Vx + 467.0) / 66.6) * 1e-3 # s else: return 1 / 3.7 * (np.exp(-(Vm + cls.Vx + 22) / 10.5) + 28.0) * 1e-3 # s @staticmethod def oinf(Vm): return 1.0 / (1.0 + np.exp((Vm + 75.0) / 5.5)) @staticmethod def tauo(Vm): return 1 / (np.exp(-14.59 - 0.086 * Vm) + np.exp(-1.87 + 0.0701 * Vm)) * 1e-3 # s @classmethod def alphao(cls, Vm): return cls.oinf(Vm) / cls.tauo(Vm) # s-1 @classmethod def betao(cls, Vm): return (1 - cls.oinf(Vm)) / cls.tauo(Vm) # s-1 # ------------------------------ States derivatives ------------------------------ @classmethod def derStates(cls): return {**super().derStates(), **{ 'Cai': lambda Vm, x: ((cls.Cai_min - x['Cai']) / cls.taur_Cai - cls.current_to_molar_rate_Ca * cls.iCaT(x['s'], x['u'], Vm)), # M/s 'P0': lambda _, x: cls.k2 * (1 - x['P0']) - cls.k1 * x['P0'] * x['Cai']**cls.nCa, 'O': lambda Vm, x: (cls.alphao(Vm) * x['C'] - cls.betao(Vm) * x['O'] - cls.k3 * x['O'] * (1 - x['P0']) + cls.k4 * (1 - x['O'] - x['C'])), 'C': lambda Vm, x: cls.betao(Vm) * x['O'] - cls.alphao(Vm) * x['C'], }} # ------------------------------ Steady states ------------------------------ @classmethod def steadyStates(cls): lambda_dict = super().steadyStates() lambda_dict['Cai'] = lambda Vm: (cls.Cai_min - cls.taur_Cai * cls.current_to_molar_rate_Ca * cls.iCaT(cls.sinf(Vm), cls.uinf(Vm), Vm)) # M lambda_dict['P0'] = lambda Vm: cls.k2 / (cls.k2 + cls.k1 * lambda_dict['Cai'](Vm)**cls.nCa) lambda_dict['O'] = lambda Vm: (cls.k4 / (cls.k3 * (1 - lambda_dict['P0'](Vm)) + cls.k4 * (1 + cls.betao(Vm) / cls.alphao(Vm)))) lambda_dict['C'] = lambda Vm: cls.betao(Vm) / cls.alphao(Vm) * lambda_dict['O'](Vm) return lambda_dict # ------------------------------ Membrane currents ------------------------------ @classmethod def iKLeak(cls, Vm): ''' Potassium leakage current ''' return cls.gKLeak * (Vm - cls.EK) # mA/m2 @classmethod def iH(cls, O, C, Vm): ''' outward mixed cationic current ''' return cls.gHbar * (O + 2 * cls.OL(O, C)) * (Vm - cls.EH) # mA/m2 @classmethod def currents(cls): return {**super().currents(), **{ 'iKLeak': lambda Vm, x: cls.iKLeak(Vm), 'iH': lambda Vm, x: cls.iH(x['O'], x['C'], Vm) }} -class ThalamoCorticalTweak70(ThalamoCortical): +# class ThalamoCorticalTweak70(ThalamoCortical): - name = 'TCtweak70' - Vm0 = -70.0 - gKLeak = 0.377 +# name = 'TCtweak70' +# Vm0 = -70.0 +# gKLeak = 0.377 -class ThalamoCorticalTweak50(ThalamoCortical): +# class ThalamoCorticalTweak50(ThalamoCortical): - name = 'TCtweak50' - Vm0 = -50.0 - gCaTbar = 140.0 # or gHbar = 51.5 +# name = 'TCtweak50' +# Vm0 = -50.0 +# gCaTbar = 140.0 # or gHbar = 51.5 diff --git a/PySONIC/plt/timeseries.py b/PySONIC/plt/timeseries.py index fc64963..aa2214e 100644 --- a/PySONIC/plt/timeseries.py +++ b/PySONIC/plt/timeseries.py @@ -1,507 +1,506 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Email: theo.lemaire@epfl.ch # @Date: 2018-09-25 16:18:45 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2020-05-07 16:18:25 +# @Last Modified time: 2020-05-27 21:37:20 import numpy as np import matplotlib.pyplot as plt from ..postpro import detectSpikes, convertPeaksProperties from ..utils import * from .pltutils import * class TimeSeriesPlot(GenericPlot): ''' Generic interface to build a plot displaying temporal profiles of model simulations. ''' @classmethod def setTimeLabel(cls, ax, tplt, fs): return super().setXLabel(ax, tplt, fs) @classmethod def setYLabel(cls, ax, yplt, fs, grouplabel=None): if grouplabel is not None: yplt['label'] = grouplabel return super().setYLabel(ax, yplt, fs) def checkInputs(self, *args, **kwargs): raise NotImplementedError @staticmethod def getStimStates(df): try: stimstate = df['stimstate'] except KeyError: stimstate = df['states'] return stimstate.values @classmethod def getStimPulses(cls, t, states): ''' Determine the onset and offset times of pulses from a stimulation vector. :param t: time vector (s). :param states: a vector of stimulation state (ON/OFF) at each instant in time. :return: list of 3-tuples start time, end time and value of each pulse. ''' # Compute states derivatives and identify transition indexes dstates = np.diff(states) itransitions = np.where(np.abs(dstates) > 0)[0] + 1 if states[0] != 0.: itransitions = np.hstack(([0], itransitions)) if states[-1] != 0: itransitions = np.hstack((itransitions, [t.size - 1])) pulses = list(zip(t[itransitions[:-1]], t[itransitions[1:]], states[itransitions[:-1]])) return list(filter(lambda x: x[2] != 0, pulses)) def addLegend(self, fig, ax, handles, labels, fs, color=None, ls=None): lh = ax.legend(handles, labels, loc=1, fontsize=fs, frameon=False) if color is not None: for l in lh.get_lines(): l.set_color(color) if ls: for l in lh.get_lines(): l.set_linestyle(ls) @classmethod def materializeSpikes(cls, ax, data, tplt, yplt, color, mode, add_to_legend=False): ispikes, properties = detectSpikes(data) t = data['t'].values Qm = data['Qm'].values if ispikes is not None: yoffset = 5 ax.plot(t[ispikes] * tplt['factor'], Qm[ispikes] * yplt['factor'] + yoffset, 'v', color=color, label='spikes' if add_to_legend else None) if mode == 'details': ileft = properties['left_bases'] iright = properties['right_bases'] properties = convertPeaksProperties(t, properties) ax.plot(t[ileft] * tplt['factor'], Qm[ileft] * yplt['factor'] - 5, '<', color=color, label='left-bases' if add_to_legend else None) ax.plot(t[iright] * tplt['factor'], Qm[iright] * yplt['factor'] - 10, '>', color=color, label='right-bases' if add_to_legend else None) ax.vlines( x=t[ispikes] * tplt['factor'], ymin=(Qm[ispikes] - properties['prominences']) * yplt['factor'], ymax=Qm[ispikes] * yplt['factor'], color=color, linestyles='dashed', label='prominences' if add_to_legend else '') ax.hlines( y=properties['width_heights'] * yplt['factor'], xmin=properties['left_ips'] * tplt['factor'], xmax=properties['right_ips'] * tplt['factor'], color=color, linestyles='dotted', label='half-widths' if add_to_legend else '') return add_to_legend @staticmethod def prepareTime(t, tplt): if tplt['onset'] > 0.0: tonset = t.min() - 0.05 * np.ptp(t) t = np.insert(t, 0, tonset) return t * tplt['factor'] @staticmethod def getPatchesColors(x): if np.all([xx == x[0] for xx in x]): return ['#8A8A8A'] * len(x) else: xabsmax = np.abs(x).max() _, sm = setNormalizer(plt.get_cmap('RdGy'), (-xabsmax, xabsmax), 'lin') return [sm.to_rgba(xx) for xx in x] @classmethod def addPatches(cls, ax, pulses, tplt, color=None): tstart, tend, x = zip(*pulses) if color is None: colors = cls.getPatchesColors(x) else: colors = [color] * len(x) for i in range(len(pulses)): ax.axvspan(tstart[i] * tplt['factor'], tend[i] * tplt['factor'], edgecolor='none', facecolor=colors[i], alpha=0.2) @staticmethod def plotInset(inset_ax, inset, t, y, tplt, yplt, line, color, lw): inset_ax.plot(t, y, linewidth=lw, linestyle=line, color=color) return inset_ax - @staticmethod - def addInsetPatches(ax, inset_ax, inset, pulses, tplt, color): + @classmethod + def addInsetPatches(cls, ax, inset_ax, inset, pulses, tplt, color): tstart, tend, x = [np.array([z]) for z in zip(*pulses)] - # colors = cls.getPatchesColors(x) tfactor = tplt['factor'] ybottom, ytop = ax.get_ylim() cond_start = np.logical_and(tstart > (inset['xlims'][0] / tfactor), tstart < (inset['xlims'][1] / tfactor)) cond_end = np.logical_and(tend > (inset['xlims'][0] / tfactor), tend < (inset['xlims'][1] / tfactor)) cond_glob = np.logical_and(tstart < (inset['xlims'][0] / tfactor), tend > (inset['xlims'][1] / tfactor)) cond_onoff = np.logical_or(cond_start, cond_end) cond = np.logical_or(cond_onoff, cond_glob) tstart, tend, x = [z[cond] for z in [tstart, tend, x]] colors = cls.getPatchesColors(x) npatches_inset = tstart.size for i in range(npatches_inset): inset_ax.add_patch(Rectangle( (tstart[i] * tfactor, ybottom), (tend[i] - tstart[i]) * tfactor, ytop - ybottom, color=colors[i], alpha=0.1)) class CompTimeSeries(ComparativePlot, TimeSeriesPlot): ''' Interface to build a comparative plot displaying profiles of a specific output variable across different model simulations. ''' def __init__(self, outputs, varname): ''' Constructor. :param outputs: list / generator of simulator outputs to be compared. :param varname: name of variable to extract and compare ''' ComparativePlot.__init__(self, outputs, varname) def checkPatches(self, patches): self.greypatch = False if patches == 'none': self.patchfunc = lambda _: False elif patches == 'all': self.patchfunc = lambda _: True elif patches == 'one': self.patchfunc = lambda j: True if j == 0 else False self.greypatch = True elif isinstance(patches, list): if not all(isinstance(p, bool) for p in patches): raise TypeError('Invalid patch sequence: all list items must be boolean typed') self.patchfunc = lambda j: patches[j] if len(patches) > j else False else: raise ValueError( 'Invalid patches: must be either "none", all", "one", or a boolean list') def checkInputs(self, labels, patches): self.checkLabels(labels) self.checkPatches(patches) @staticmethod def createBackBone(figsize): fig, ax = plt.subplots(figsize=figsize) ax.set_zorder(0) return fig, ax @classmethod def postProcess(cls, ax, tplt, yplt, fs, meta, prettify): cls.removeSpines(ax) if 'bounds' in yplt: ymin, ymax = ax.get_ylim() ax.set_ylim(min(ymin, yplt['bounds'][0]), max(ymax, yplt['bounds'][1])) elif 'strictbounds' in yplt: ax.set_ylim(*yplt['strictbounds']) cls.setTimeLabel(ax, tplt, fs) cls.setYLabel(ax, yplt, fs) if prettify: cls.prettify(ax, xticks=(0, meta['tstim'] * tplt['factor'])) cls.setTickLabelsFontSize(ax, fs) def render(self, figsize=(11, 4), fs=10, lw=2, labels=None, colors=None, lines=None, patches='one', inset=None, frequency=1, spikes='none', cmap=None, cscale='lin', trange=None, prettify=False): ''' Render plot. :param figsize: figure size (x, y) :param fs: labels fontsize :param lw: linewidth :param labels: list of labels to use in the legend :param colors: list of colors to use for each curve :param lines: list of linestyles :param patches: string indicating whether/how to mark stimulation periods with rectangular patches :param inset: string indicating whether/how to mark an inset zooming on a particular region of the graph :param frequency: frequency at which to plot samples :param spikes: string indicating how to show spikes ("none", "marks" or "details") :param cmap: color map to use for colobar-based comparison (if not None) :param cscale: color scale to use for colobar-based comparison :param trange: optional lower and upper bounds to time axis :return: figure handle ''' self.checkInputs(labels, patches) fcodes = [] fig, ax = self.createBackBone(figsize) if inset is not None: inset_ax = self.addInset(fig, ax, inset) # Loop through data files handles, comp_values, full_labels = [], [], [] tmin, tmax = np.inf, -np.inf for j, output in enumerate(self.outputs): color = f'C{j}' if colors is None else colors[j] line = '-' if lines is None else lines[j] patch = self.patchfunc(j) # Load data try: data, meta = self.getData(output, frequency, trange) except ValueError: continue if 'tcomp' in meta: meta.pop('tcomp') # Extract model model = self.getModel(meta) fcodes.append(model.filecode(meta)) # Add label to list full_labels.append(self.figtitle(model, meta)) # Check consistency of sim types and check differing inputs comp_values = self.checkConsistency(meta, comp_values) # Extract time and stim pulses t = data['t'].values stimstate = self.getStimStates(data) pulses = self.getStimPulses(t, stimstate) tplt = self.getTimePltVar(model.tscale) t = self.prepareTime(t, tplt) # Extract y-variable pltvars = model.getPltVars() if self.varname not in pltvars: pltvars_str = ', '.join([f'"{p}"' for p in pltvars.keys()]) raise KeyError( f'Unknown plot variable: "{self.varname}". Candidates are: {pltvars_str}') yplt = pltvars[self.varname] y = extractPltVar(model, yplt, data, meta, t.size, self.varname) # Plot time series handles.append(ax.plot(t, y, linewidth=lw, linestyle=line, color=color)[0]) # Optional: add spikes if self.varname == 'Qm' and spikes != 'none': self.materializeSpikes(ax, data, tplt, yplt, color, spikes) # Plot optional inset if inset is not None: inset_ax = self.plotInset( inset_ax, inset, t, y, tplt, yplt, lines[j], color, lw) # Add optional STIM-ON patches if patch: ybottom, ytop = ax.get_ylim() patchcolor = None if self.greypatch else handles[j].get_color() self.addPatches(ax, pulses, tplt, color=patchcolor) if inset is not None: self.addInsetPatches(ax, inset_ax, inset, pulses, tplt, patchcolor) tmin, tmax = min(tmin, t.min()), max(tmax, t.max()) # Get common label and add it as title common_label = self.getCommonLabel(full_labels.copy(), seps=':@,()') self.wraptitle(ax, common_label, fs=fs) # Get comp info if any if self.comp_ref_key is not None: self.comp_info = model.inputs().get(self.comp_ref_key, None) # Post-process figure self.postProcess(ax, tplt, yplt, fs, meta, prettify) ax.set_xlim(tmin, tmax) fig.tight_layout() # Materialize inset if any if inset is not None: self.materializeInset(ax, inset_ax, inset) # Add labels or colorbar legend if cmap is not None: if not self.is_unique_comp: raise ValueError('Colormap mode unavailable for multiple differing parameters') if self.comp_info is None: raise ValueError('Colormap mode unavailable for qualitative comparisons') self.addCmap( fig, cmap, handles, comp_values, self.comp_info, fs, prettify, zscale=cscale) else: comp_values, comp_labels = self.getCompLabels(comp_values) labels = self.chooseLabels(labels, comp_labels, full_labels) self.addLegend(fig, ax, handles, labels, fs) # Add window title based on common pattern common_fcode = self.getCommonLabel(fcodes.copy()) fig.canvas.set_window_title(common_fcode) return fig class GroupedTimeSeries(TimeSeriesPlot): ''' Interface to build a plot displaying profiles of several output variables arranged into specific schemes. ''' def __init__(self, outputs, pltscheme=None): ''' Constructor. :param outputs: list / generator of simulation outputs. :param varname: name of variable to extract and compare ''' super().__init__(outputs) self.pltscheme = pltscheme @staticmethod def createBackBone(pltscheme): naxes = len(pltscheme) if naxes == 1: fig, ax = plt.subplots(figsize=(11, 4)) axes = [ax] else: fig, axes = plt.subplots(naxes, 1, figsize=(11, min(3 * naxes, 9))) return fig, axes @classmethod def postProcess(cls, axes, tplt, fs, meta, prettify): for ax in axes: cls.removeSpines(ax) if prettify: cls.prettify(ax, xticks=(0, meta['pp'].tstim * tplt['factor']), yfmt=None) cls.setTickLabelsFontSize(ax, fs) for ax in axes[:-1]: ax.get_shared_x_axes().join(ax, axes[-1]) ax.set_xticklabels([]) cls.setTimeLabel(axes[-1], tplt, fs) def render(self, fs=10, lw=2, labels=None, colors=None, lines=None, patches='one', save=False, outputdir=None, fig_ext='png', frequency=1, spikes='none', trange=None, prettify=False): ''' Render plot. :param fs: labels fontsize :param lw: linewidth :param labels: list of labels to use in the legend :param colors: list of colors to use for each curve :param lines: list of linestyles :param patches: boolean indicating whether to mark stimulation periods with rectangular patches :param save: boolean indicating whether or not to save the figure(s) :param outputdir: path to output directory in which to save figure(s) :param fig_ext: string indcating figure extension ("png", "pdf", ...) :param frequency: frequency at which to plot samples :param spikes: string indicating how to show spikes ("none", "marks" or "details") :param trange: optional lower and upper bounds to time axis :return: figure handle(s) ''' if colors is None: colors = plt.get_cmap('tab10').colors figs = [] for output in self.outputs: # Load data and extract model try: data, meta = self.getData(output, frequency, trange) except ValueError: continue model = self.getModel(meta) # Extract time and stim pulses t = data['t'].values stimstate = self.getStimStates(data) pulses = self.getStimPulses(t, stimstate) tplt = self.getTimePltVar(model.tscale) t = self.prepareTime(t, tplt) # Check plot scheme if provided, otherwise generate it pltvars = model.getPltVars() if self.pltscheme is not None: for key in list(sum(list(self.pltscheme.values()), [])): if key not in pltvars: raise KeyError(f'Unknown plot variable: "{key}"') pltscheme = self.pltscheme else: pltscheme = model.pltScheme # Create figure fig, axes = self.createBackBone(pltscheme) # Loop through each subgraph for ax, (grouplabel, keys) in zip(axes, pltscheme.items()): ax_legend_spikes = False # Extract variables to plot nvars = len(keys) ax_pltvars = [pltvars[k] for k in keys] if nvars == 1: ax_pltvars[0]['color'] = 'k' ax_pltvars[0]['ls'] = '-' # Plot time series icolor = 0 for yplt, name in zip(ax_pltvars, pltscheme[grouplabel]): color = yplt.get('color', colors[icolor]) y = extractPltVar(model, yplt, data, meta, t.size, name) ax.plot(t, y, yplt.get('ls', '-'), c=color, lw=lw, label='$\\rm {}$'.format(yplt["label"])) if 'color' not in yplt: icolor += 1 # Optional: add spikes if name == 'Qm' and spikes != 'none': ax_legend_spikes = self.materializeSpikes( ax, data, tplt, yplt, color, spikes, add_to_legend=True) # Set y-axis unit and bounds self.setYLabel(ax, ax_pltvars[0].copy(), fs, grouplabel=grouplabel) if 'bounds' in ax_pltvars[0]: ymin, ymax = ax.get_ylim() ax_min = min(ymin, *[ap['bounds'][0] for ap in ax_pltvars]) ax_max = max(ymax, *[ap['bounds'][1] for ap in ax_pltvars]) ax.set_ylim(ax_min, ax_max) # Add legend if nvars > 1 or 'gate' in ax_pltvars[0]['desc'] or ax_legend_spikes: ax.legend(fontsize=fs, loc=7, ncol=nvars // 4 + 1, frameon=False) # Set x-limits and add optional patches for ax in axes: ax.set_xlim(t.min(), t.max()) if patches != 'none': self.addPatches(ax, pulses, tplt) # Post-process figure self.postProcess(axes, tplt, fs, meta, prettify) self.wraptitle(axes[0], self.figtitle(model, meta), fs=fs) fig.tight_layout() fig.canvas.set_window_title(model.filecode(meta)) # Save figure if needed (automatic or checked) if save: filecode = model.filecode(meta) if outputdir is None: raise ValueError('output directory not specified') plt_filename = f'{outputdir}/{filecode}.{fig_ext}' plt.savefig(plt_filename) logger.info(f'Saving figure as "{plt_filename}"') plt.close() figs.append(fig) return figs if __name__ == '__main__': # example of use filepaths = OpenFilesDialog('pkl')[0] comp_plot = CompTimeSeries(filepaths, 'Qm') fig = comp_plot.render( lines=['-', '--'], labels=['60 kPa', '80 kPa'], patches='one', colors=['r', 'g'], xticks=[0, 100], yticks=[-80, +50], inset={'xcoords': [5, 40], 'ycoords': [-35, 45], 'xlims': [57.5, 60.5], 'ylims': [10, 35]} ) scheme_plot = GroupedTimeSeries(filepaths) figs = scheme_plot.render() plt.show() diff --git a/PySONIC/utils.py b/PySONIC/utils.py index 88e6a88..8ebcc71 100644 --- a/PySONIC/utils.py +++ b/PySONIC/utils.py @@ -1,996 +1,1096 @@ # -*- coding: utf-8 -*- # @Author: Theo Lemaire # @Email: theo.lemaire@epfl.ch # @Date: 2016-09-19 22:30:46 # @Last Modified by: Theo Lemaire -# @Last Modified time: 2020-05-04 12:02:06 +# @Last Modified time: 2020-06-03 16:54:41 ''' Definition of generic utility functions used in other modules ''' import csv from functools import wraps import operator import time from inspect import signature import os from shutil import get_terminal_size import lockfile import math import pickle import json from tqdm import tqdm import logging import tkinter as tk from tkinter import filedialog import base64 import datetime import numpy as np +import pandas as pd from scipy.optimize import brentq +from scipy.interpolate import interp1d from scipy import linalg import colorlog from pushbullet import Pushbullet # Package logger my_log_formatter = colorlog.ColoredFormatter( '%(log_color)s %(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S:', reset=True, log_colors={ 'DEBUG': 'green', 'INFO': 'white', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', }, style='%') def setHandler(logger, handler): for h in logger.handlers: logger.removeHandler(h) logger.addHandler(handler) return logger def setLogger(name, formatter): handler = colorlog.StreamHandler() handler.setFormatter(formatter) logger = colorlog.getLogger(name) logger.addHandler(handler) return logger class TqdmHandler(logging.StreamHandler): def __init__(self, formatter): logging.StreamHandler.__init__(self) self.setFormatter(formatter) def emit(self, record): msg = self.format(record) tqdm.write(msg) logger = setLogger('PySONIC', my_log_formatter) def fillLine(text, char='-', totlength=None): ''' Surround a text with repetitions of a specific character in order to fill a line to a given total length. :param text: text to be surrounded :param char: surrounding character :param totlength: target number of characters in filled text line :return: filled text line ''' if totlength is None: totlength = get_terminal_size().columns - 1 ndashes = totlength - len(text) - 2 if ndashes < 2: return text else: nside = ndashes // 2 nleft, nright = nside, nside if ndashes % 2 == 1: nright += 1 return f'{char * nleft} {text} {char * nright}' # SI units prefixes si_prefixes = { 'y': 1e-24, # yocto 'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nano 'u': 1e-6, # micro 'm': 1e-3, # mili '': 1e0, # None 'k': 1e3, # kilo 'M': 1e6, # mega 'G': 1e9, # giga 'T': 1e12, # tera 'P': 1e15, # peta 'E': 1e18, # exa 'Z': 1e21, # zetta 'Y': 1e24, # yotta } sorted_si_prefixes = sorted(si_prefixes.items(), key=operator.itemgetter(1)) def getSIpair(x, scale='lin'): ''' Get the correct SI factor and prefix for a floating point number. ''' if isIterable(x): # If iterable, get a representative number of the distribution x = np.asarray(x) x = x.prod()**(1.0 / x.size) if scale == 'log' else np.mean(x) if x == 0: return 1e0, '' else: vals = [tmp[1] for tmp in sorted_si_prefixes] ix = np.searchsorted(vals, np.abs(x)) - 1 if np.abs(x) == vals[ix + 1]: ix += 1 return vals[ix], sorted_si_prefixes[ix][0] def si_format(x, precision=0, space=' '): ''' Format a float according to the SI unit system, with the appropriate prefix letter. ''' if isinstance(x, float) or isinstance(x, int) or isinstance(x, np.float) or\ isinstance(x, np.int32) or isinstance(x, np.int64): factor, prefix = getSIpair(x) return f'{x / factor:.{precision}f}{space}{prefix}' elif isinstance(x, list) or isinstance(x, tuple): return [si_format(item, precision, space) for item in x] elif isinstance(x, np.ndarray) and x.ndim == 1: return [si_format(float(item), precision, space) for item in x] else: raise ValueError(f'cannot si_format {type(x)} objects') def pow10_format(number, precision=2): ''' Format a number in power of 10 notation. ''' sci_string = f'{number:.{precision}e}' value, exponent = sci_string.split("e") value, exponent = float(value), int(exponent) val_str = f'{value} * ' if value != 1. else '' return f'{val_str}10^{{{exponent}}}' def rmse(x1, x2, axis=None): ''' Compute the root mean square error between two 1D arrays ''' return np.sqrt(((x1 - x2) ** 2).mean(axis=axis)) def rsquared(x1, x2): ''' compute the R-squared coefficient between two 1D arrays ''' residuals = x1 - x2 ss_res = np.sum(residuals**2) ss_tot = np.sum((x1 - np.mean(x1))**2) return 1 - (ss_res / ss_tot) def Pressure2Intensity(p, rho=1075.0, c=1515.0): ''' Return the spatial peak, pulse average acoustic intensity (ISPPA) associated with the specified pressure amplitude. :param p: pressure amplitude (Pa) :param rho: medium density (kg/m3) :param c: speed of sound in medium (m/s) :return: spatial peak, pulse average acoustic intensity (W/m2) ''' return p**2 / (2 * rho * c) def Intensity2Pressure(I, rho=1075.0, c=1515.0): ''' Return the pressure amplitude associated with the specified spatial peak, pulse average acoustic intensity (ISPPA). :param I: spatial peak, pulse average acoustic intensity (W/m2) :param rho: medium density (kg/m3) :param c: speed of sound in medium (m/s) :return: pressure amplitude (Pa) ''' return np.sqrt(2 * rho * c * I) def convertPKL2JSON(): for pkl_filepath in OpenFilesDialog('pkl')[0]: logger.info(f'Processing {pkl_filepath} ...') json_filepath = f'{os.path.splitext(pkl_filepath)[0]}.json' with open(pkl_filepath, 'rb') as fpkl, open(json_filepath, 'w') as fjson: data = pickle.load(fpkl) json.dump(data, fjson, ensure_ascii=False, sort_keys=True, indent=4) logger.info('All done!') def OpenFilesDialog(filetype, dirname=''): ''' Open a FileOpenDialogBox to select one or multiple file. The default directory and file type are given. :param dirname: default directory :param filetype: default file type :return: tuple of full paths to the chosen filenames ''' root = tk.Tk() root.withdraw() filenames = filedialog.askopenfilenames( filetypes=[(filetype + " files", '.' + filetype)], initialdir=dirname ) if len(filenames) == 0: raise ValueError('no input file selected') par_dir = os.path.abspath(os.path.join(filenames[0], os.pardir)) return filenames, par_dir def selectDirDialog(title='Select directory'): ''' Open a dialog box to select a directory. :return: full path to selected directory ''' root = tk.Tk() root.withdraw() directory = filedialog.askdirectory(title=title) if directory == '': raise ValueError('no directory selected') return directory def SaveFileDialog(filename, dirname=None, ext=None): ''' Open a dialog box to save file. :param filename: filename :param dirname: initial directory :param ext: default extension :return: full path to the chosen filename ''' root = tk.Tk() root.withdraw() filename_out = filedialog.asksaveasfilename( defaultextension=ext, initialdir=dirname, initialfile=filename) if len(filename_out) == 0: raise ValueError('no output filepath selected') return filename_out def loadData(fpath, frequency=1): ''' Load dataframe and metadata dictionary from pickle file. ''' logger.info('Loading data from "%s"', os.path.basename(fpath)) with open(fpath, 'rb') as fh: frame = pickle.load(fh) df = frame['data'].iloc[::frequency] meta = frame['meta'] return df, meta def rescale(x, lb=None, ub=None, lb_new=0, ub_new=1): ''' Rescale a value to a specific interval by linear transformation. ''' if lb is None: lb = x.min() if ub is None: ub = x.max() xnorm = (x - lb) / (ub - lb) return xnorm * (ub_new - lb_new) + lb_new def expandRange(xmin, xmax, exp_factor=2): if xmin > xmax: raise ValueError('values must be provided in (min, max) order') xptp = xmax - xmin xmid = (xmin + xmax) / 2 xdev = xptp * exp_factor / 2 return (xmid - xdev, xmin + xdev) def isIterable(x): for t in [list, tuple, np.ndarray]: if isinstance(x, t): return True return False def isWithin(name, val, bounds, rel_tol=1e-9, raise_warning=True): ''' Check if a floating point number is within an interval. If the value falls outside the interval, an error is raised. If the value falls just outside the interval due to rounding errors, the associated interval bound is returned. :param val: float value :param bounds: interval bounds (float tuple) :return: original or corrected value ''' if isIterable(val): return np.array([isWithin(name, v, bounds, rel_tol, raise_warning) for v in val]) if val >= bounds[0] and val <= bounds[1]: return val elif val < bounds[0] and math.isclose(val, bounds[0], rel_tol=rel_tol): if raise_warning: logger.warning( 'Rounding %s value (%s) to interval lower bound (%s)', name, val, bounds[0]) return bounds[0] elif val > bounds[1] and math.isclose(val, bounds[1], rel_tol=rel_tol): if raise_warning: logger.warning( 'Rounding %s value (%s) to interval upper bound (%s)', name, val, bounds[1]) return bounds[1] else: raise ValueError(f'{name} value ({val}) out of [{bounds[0]}, {bounds[1]}] interval') def getDistribution(xmin, xmax, nx, scale='lin'): if scale == 'log': xmin, xmax = np.log10(xmin), np.log10(xmax) return {'lin': np.linspace, 'log': np.logspace}[scale](xmin, xmax, nx) def getDistFromList(xlist): if not isinstance(xlist, list): raise TypeError('Input must be a list') if len(xlist) != 4: raise ValueError('List must contain exactly 4 arguments ([type, min, max, n])') scale = xlist[0] if scale not in ('log', 'lin'): raise ValueError('Unknown distribution type (must be "lin" or "log")') xmin, xmax = [float(x) for x in xlist[1:-1]] if xmin >= xmax: raise ValueError('Specified minimum higher or equal than specified maximum') nx = int(xlist[-1]) if nx < 2: raise ValueError('Specified number must be at least 2') return getDistribution(xmin, xmax, nx, scale=scale) def getIndex(container, value): ''' Return the index of a float / string value in a list / array :param container: list / 1D-array of elements :param value: value to search for :return: index of value (if found) ''' if isinstance(value, float): container = np.array(container) imatches = np.where(np.isclose(container, value, rtol=1e-9, atol=1e-16))[0] if len(imatches) == 0: raise ValueError(f'{value} not found in {container}') return imatches[0] elif isinstance(value, str): return container.index(value) def funcSig(func, args, kwargs): args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] return f'{func.__name__}({", ".join(args_repr + kwargs_repr)})' def debug(func): ''' Print the function signature and return value. ''' @wraps(func) def wrapper_debug(*args, **kwargs): print(f'Calling {funcSig(func, args, kwargs)}') value = func(*args, **kwargs) print(f"{func.__name__!r} returned {value!r}") return value return wrapper_debug def timer(func): ''' Monitor and return the runtime of the decorated function. ''' @wraps(func) def wrapper(*args, **kwargs): start_time = time.perf_counter() value = func(*args, **kwargs) end_time = time.perf_counter() run_time = end_time - start_time return value, run_time return wrapper def alignWithFuncDef(func, args, kwargs): ''' Align a set of provided positional and keyword arguments with the arguments signature in a specific function definition. :param func: function object :param args: list of provided positional arguments :param kwargs: dictionary of provided keyword arguments :return: 2-tuple with the modified arguments and ''' # Get positional and keyword arguments from function signature sig_params = {k: v for k, v in signature(func).parameters.items()} sig_args = list(filter(lambda x: x.default == x.empty, sig_params.values())) sig_kwargs = {k: v.default for k, v in sig_params.items() if v.default != v.empty} sig_nargs = len(sig_args) kwarg_keys = list(sig_kwargs.keys()) # Restrain provided positional arguments to those that are also positional in signature new_args = args[:sig_nargs] # Construct hybrid keyword arguments dictionary from: # - remaining positional arguments # - provided keyword arguments # - default keyword arguments new_kwargs = sig_kwargs for i, x in enumerate(args[sig_nargs:]): new_kwargs[kwarg_keys[i]] = x for k, v in kwargs.items(): new_kwargs[k] = v return new_args, new_kwargs def alignWithMethodDef(method, args, kwargs): args, kwargs = alignWithFuncDef(method, [None] + list(args), kwargs) return tuple(args[1:]), kwargs def logCache(fpath, delimiter='\t', out_type=float): ''' Add an extra IO memoization functionality to a function using file caching, to avoid repetitions of tedious computations with identical inputs. ''' def wrapper_with_args(func): @wraps(func) def wrapper(*args, **kwargs): # If function has history -> do not log if 'history' in kwargs: return func(*args, **kwargs) # Modify positional and keyword arguments to match function signature, if needed args, kwargs = alignWithFuncDef(func, args, kwargs) # Translate args and kwargs into string signature fsignature = funcSig(func, args, kwargs) # If entry present in log, return corresponding output if os.path.isfile(fpath): with open(fpath, 'r', newline='') as f: reader = csv.reader(f, delimiter=delimiter) for row in reader: if row[0] == fsignature: logger.debug(f'entry found in "{os.path.basename(fpath)}"') return out_type(row[1]) # Otherwise, compute output and log it into file before returning out = func(*args, **kwargs) lock = lockfile.FileLock(fpath) lock.acquire() with open(fpath, 'a', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=delimiter) writer.writerow([fsignature, str(out)]) lock.release() return out return wrapper return wrapper_with_args def fileCache(root, fcode_func, ext='json'): def wrapper_with_args(func): @wraps(func) def wrapper(*args, **kwargs): # Get load and dump functions from file extension try: load_func = { 'json': json.load, 'pkl': pickle.load, 'csv': lambda f: np.loadtxt(f, delimiter=',') }[ext] dump_func = { 'json': json.dump, 'pkl': pickle.dump, 'csv': lambda x, f: np.savetxt(f, x, delimiter=',') }[ext] except KeyError: raise ValueError('Unknown file extension') # Get read and write mode (text or binary) from file extension mode = 'b' if ext == 'pkl' else '' # Get file path from root and function arguments, using fcode function if callable(fcode_func): fcode = fcode_func(*args) else: fcode = fcode_func fpath = os.path.join(os.path.abspath(root), f'{fcode}.{ext}') # If file exists, load output from it if os.path.isfile(fpath): logger.info(f'loading data from "{fpath}"') with open(fpath, 'r' + mode) as f: out = load_func(f) # Otherwise, execute function and create the file to dump the output else: logger.warning(f'reference data file not found: "{fpath}"') out = func(*args, **kwargs) logger.info(f'dumping data in "{fpath}"') lock = lockfile.FileLock(fpath) lock.acquire() with open(fpath, 'w' + mode) as f: dump_func(out, f) lock.release() return out return wrapper return wrapper_with_args def derivative(f, x, eps, method='central'): ''' Compute the difference formula for f'(x) with perturbation size eps. :param dfunc: derivatives function, taking an array of states and returning an array of derivatives :param x: states vector :param method: difference formula: 'forward', 'backward' or 'central' :param eps: perturbation vector (same size as states vector) :return: numerical approximation of the derivative around the fixed point ''' if isIterable(x): if not isIterable(eps) or len(eps) != len(x): raise ValueError('eps must be the same size as x') elif np.sum(eps != 0.) != 1: raise ValueError('eps must be zero-valued across all but one dimensions') eps_val = np.sum(eps) else: eps_val = eps if method == 'central': df = (f(x + eps) - f(x - eps)) / 2 elif method == 'forward': df = f(x + eps) - f(x) elif method == 'backward': df = f(x) - f(x - eps) else: raise ValueError("Method must be 'central', 'forward' or 'backward'.") return df / eps_val def jacobian(dfunc, x, rel_eps=None, abs_eps=None, method='central'): ''' Evaluate the Jacobian maatrix of a (time-invariant) system, given a states vector and derivatives function. :param dfunc: derivatives function, taking an array of n states and returning an array of n derivatives :param x: n-states vector :return: n-by-n square Jacobian matrix ''' if sum(e is not None for e in [abs_eps, rel_eps]) != 1: raise ValueError('one (and only one) of "rel_eps" or "abs_eps" parameters must be provided') # Determine vector size x = np.asarray(x) n = x.size # Initialize Jacobian matrix J = np.empty((n, n)) # Create epsilon vector if rel_eps is not None: mode = 'relative' eps_vec = rel_eps else: mode = 'absolute' eps_vec = abs_eps if not isIterable(eps_vec): eps_vec = np.array([eps_vec] * n) if mode == 'relative': eps = x * eps_vec else: eps = eps_vec # Perturb each state by epsilon on both sides, re-evaluate derivatives # and assemble Jacobian matrix ei = np.zeros(n) for i in range(n): ei[i] = 1 J[:, i] = derivative(dfunc, x, eps * ei, method=method) ei[i] = 0 return J def classifyFixedPoint(x, dfunc): ''' Characterize the stability of a fixed point by numerically evaluating its Jacobian matrix and evaluating the sign of the real part of its associated eigenvalues. :param x: n-states vector :param dfunc: derivatives function, taking an array of n states and returning an array of n derivatives ''' # Compute Jacobian numerically # print(f'x = {x}, dfunx(x) = {dfunc(x)}') eps_machine = np.sqrt(np.finfo(float).eps) J = jacobian(dfunc, x, rel_eps=eps_machine, method='forward') # Compute eigenvalues and eigenvectors eigvals, eigvecs = linalg.eig(J) logger.debug(f"eigenvalues = {[f'({x.real:.2e} + {x.imag:.2e}j)' for x in eigvals]}") # Determine fixed point stability based on eigenvalues is_neg_eigvals = eigvals.real < 0 if is_neg_eigvals.all(): # all real parts negative -> stable key = 'stable' elif is_neg_eigvals.any(): # both posivie and negative real parts -> saddle key = 'saddle' else: # all real parts positive -> unstable key = 'unstable' return eigvals, key def findModifiedEq(x0, dfunc, *args): ''' Find an equilibrium variable in a modified system by searching for its derivative root within an interval around its original equilibrium. :param x0: equilibrium value in original system. :param func: derivative function, taking the variable as first parameter. :param *args: remaining arguments needed for the derivative function. :return: variable equilibrium value in modified system. ''' is_iterable = [isIterable(arg) for arg in args] if any(is_iterable): if not all(is_iterable): raise ValueError('mix of iterables and non-iterables') lengths = [len(arg) for arg in args] if not all(n == lengths[0] for n in lengths): raise ValueError(f'inputs are not of the same size: {lengths}') n = lengths[0] res = [] for i in range(n): x = [arg[i] for arg in args] res.append(findModifiedEq(x0, dfunc, *x)) return np.array(res) else: return brentq(lambda x: dfunc(x, *args), x0 * 1e-4, x0 * 1e3, xtol=1e-16) def swapFirstLetterCase(s): if s[0].islower(): return s.capitalize() else: return s[0].lower() + s[1:] def getPow10(x, direction='up'): ''' Get the power of 10 that is closest to a number, in either direction("down" or "up"). ''' round_method = {'up': np.ceil, 'down': np.floor}[direction] return np.power(10, round_method(np.log10(x))) def rotAroundPoint2D(x, theta, p): ''' Rotate a 2D vector around a center point by a given angle. :param x: 2D coordinates vector :param theta: rotation angle (in rad) :param p: 2D center point coordinates :return: 2D rotated coordinates vector ''' n1, n2 = x.shape if n1 != 2: if n2 == 2: x = x.T else: raise ValueError('x should be a 2-by-n vector') # Rotation matrix R = np.array([ [np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)], ]) # Broadcast center point to input vector ptile = np.tile(p, (x.shape[1], 1)).T # Subtract, rotate and add return R.dot(x - ptile) + ptile def getKey(keyfile='pushbullet.key'): dir_path = os.path.dirname(os.path.realpath(__file__)) package_root = os.path.abspath(os.path.join(dir_path, os.pardir)) fpath = os.path.join(package_root, keyfile) if not os.path.isfile(fpath): raise FileNotFoundError('pushbullet API key file not found') with open(fpath) as f: encoded_key = f.readlines()[0] return base64.b64decode(str.encode(encoded_key)).decode() def sendPushNotification(msg): try: key = getKey() pb = Pushbullet(key) dt = datetime.datetime.now() s = dt.strftime('%Y-%m-%d %H:%M:%S') pb.push_note('Code Messenger', f'{s}\n{msg}') except FileNotFoundError: logger.error(f'Could not send push notification: "{msg}"') def alert(func): ''' Run a function, and send a push notification upon completion, or if an error is raised during its execution. ''' @wraps(func) def wrapper(*args, **kwargs): try: out = func(*args, **kwargs) sendPushNotification(f'completed "{func.__name__}" execution successfully') return out except BaseException as e: sendPushNotification(f'error during "{func.__name__}" execution: {e}') raise e return wrapper def sunflower(n, radius=1, alpha=1): ''' Generate a population of uniformly distributed 2D data points in a unit circle. :param n: number of data points :param alpha: coefficient determining evenness of the boundary :return: 2D matrix of Cartesian (x, y) positions ''' nbounds = np.round(alpha * np.sqrt(n)) # number of boundary points phi = (np.sqrt(5) + 1) / 2 # golden ratio k = np.arange(1, n + 1) # index vector theta = 2 * np.pi * k / phi**2 # angle vector r = np.sqrt((k - 1) / (n - nbounds - 1)) # radius vector r[r > 1] = 1 x = r * np.cos(theta) y = r * np.sin(theta) return radius * np.vstack((x, y)) def filecode(model, *args): ''' Generate file code given a specific combination of model input parameters. ''' # If meta dictionary was passed, generate inputs list from it if len(args) == 1 and isinstance(args[0], dict): meta = args[0].copy() if meta['simkey'] == 'ASTIM' and 'fs' not in meta: meta['fs'] = meta['model']['fs'] meta['method'] = meta['model']['method'] meta['qss_vars'] = None for k in ['simkey', 'model', 'tcomp', 'dt', 'atol']: if k in meta: del meta[k] args = list(meta.values()) # Otherwise, transform args tuple into list else: args = list(args) # If any argument is an iterable -> transform it to a continous string for i in range(len(args)): if isIterable(args[i]): args[i] = ''.join([str(x) for x in args[i]]) # Create file code by joining string-encoded inputs with underscores codes = model.filecodes(*args).values() return '_'.join([x for x in codes if x is not None]) def simAndSave(model, *args, **kwargs): ''' Simulate the model and save the results in a specific output directory. :param *args: list of arguments provided to the simulation function :param **kwargs: optional arguments dictionary :return: output filepath ''' # Extract output directory and overwrite boolean from keyword arguments. outputdir = kwargs.pop('outputdir', '.') overwrite = kwargs.pop('overwrite', True) # Set data and meta to None data, meta = None, None # Extract drive object from args drive, *other_args = args # If drive is searchable and not fully resolved if drive.is_searchable: if not drive.is_resolved: # Call simulate to perform titration out = model.simulate(*args) # If titration yields nothing -> no file produced -> return None if out is None: logger.warning('returning None') return None # Store data and meta data, meta = out # Update args list with resovled drive try: args = (meta['drive'], *other_args) except KeyError: args = (meta['source'], *other_args) # Check if a output file corresponding to sim inputs is found in the output directory # That check is performed prior to running the simulation, such that # it is not run if the file is present and overwrite is set ot false. fname = f'{model.filecode(*args)}.pkl' fpath = os.path.join(outputdir, fname) existing_file_msg = f'File "{fname}" already present in directory "{outputdir}"' existing_file = os.path.isfile(fpath) # If file exists and overwrite is set ot false -> return if existing_file and not overwrite: logger.warning(f'{existing_file_msg} -> preserving') return fpath # Run simulation if not already done (for titration cases) if data is None: data, meta = model.simulate(*args) # Raise warning if an existing file is overwritten if existing_file: logger.warning(f'{existing_file_msg} -> overwriting') # Save output file and return output filepath with open(fpath, 'wb') as fh: pickle.dump({'meta': meta, 'data': data}, fh) logger.debug('simulation data exported to "%s"', fpath) return fpath def moveItem(l, value, itarget): ''' Move a list item to a specific target index. :param l: list object :param value: value of the item to move :param itarget: target index :return: re-ordered list. ''' # Get absolute target index if itarget < 0: itarget += len(l) assert itarget < len(l), f'target index {itarget} exceeds list size ({len(l)})' # Get index corresponding to element and delete entry from list iref = l.index(value) new_l = l.copy() del new_l[iref] # Return re-organized list return new_l[:itarget] + [value] + new_l[itarget:] def gaussian(x, mu=0., sigma=1., A=1.): return A * np.exp(-((x - mu) / sigma)**2) def isPickable(obj): try: pickle.dumps(obj) except Exception: return False return True def resolveFuncArgs(func, *args, **kwargs): ''' Return a dictionary of positional and keyword arguments upon function call, adding defaults from simfunc signature if not provided at call time. ''' bound_args = signature(func).bind(*args, **kwargs) bound_args.apply_defaults() return dict(bound_args.arguments) def getMeta(model, simfunc, *args, **kwargs): ''' Construct an informative dictionary about the model and simulation parameters. ''' # Retrieve function call arguments args_dict = resolveFuncArgs(simfunc, model, *args, **kwargs) # Construct meta dictionary meta = {'simkey': model.simkey} for k, v in args_dict.items(): if k == 'self': meta['model'] = v.meta else: meta[k] = v return meta def bounds(arr): ''' Return the bounds or a numpy array / list. ''' return (np.nanmin(arr), np.nanmax(arr)) def addColumn(df, key, arr, preceding_key=None): ''' Add a new column to a dataframe, right after a specific column. ''' df[key] = arr if preceding_key is not None: cols = df.columns.tolist()[:-1] preceding_index = cols.index(preceding_key) df = df[cols[:preceding_index + 1] + [key] + cols[preceding_index + 1:]] return df def integerSuffix(n): return 'th' if 4 <= n % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(n % 10, 'th') def customStrftime(fmt, dt_obj): return dt_obj.strftime(fmt).replace('{S}', str(dt_obj.day) + integerSuffix(dt_obj.day)) def friendlyLogspace(xmin, xmax, bases=None): ''' Define a "friendly" logspace between two bounds. ''' if bases is None: bases = [1, 2, 5] bases = np.asarray(bases) bounds = np.array([xmin, xmax]) logbounds = np.log10(bounds) bounds_orders = np.floor(logbounds) orders = np.arange(bounds_orders[0], bounds_orders[1] + 1) factors = np.power(10., np.floor(orders)) seq = np.hstack([bases * f for f in factors]) if xmax > seq.max(): seq = np.hstack((seq, xmax)) seq = seq[np.logical_and(seq >= xmin, seq <= xmax)] if xmin not in seq: seq = np.hstack((xmin, seq)) if xmax not in seq: seq = np.hstack((seq, xmax)) return seq def differing(d1, d2, subdkey=None, diff=None): ''' Find differences in values across two dictionaries (recursively). :param d1: first dictionary :param d2: second dictionary :param subdkey: specific sub-dictionary attribute key for objects :param diff: existing diff list to append to :return: list of (key, value1, value2) tuples for each differing values ''' # Initilize diff list if diff is None: diff = [] # Check that the two dicts have the same structure if sorted(list(d1.keys())) != sorted(list(d2.keys())): raise ValueError('inconsistent inputs') # For each key - values triplet for k in d1.keys(): # If values are dicts themselves, loop recursively through them if isinstance(d1[k], dict): diff = differing(d1[k], d2[k], subdkey=subdkey, diff=diff) # If values are objects with a specific sub-dictionary attribute, # loop recursively through them elif hasattr(d1[k], subdkey): diff = differing(getattr(d1[k], subdkey), getattr(d2[k], subdkey), subdkey=subdkey, diff=diff) # Otherwise else: # If values differ, add the key - values triplet to the diff list if d1[k] != d2[k]: diff.append((k, d1[k], d2[k])) # Return the diff list return diff def extractCommonPrefix(labels): ''' Extract a common prefix and a list of suffixes from a list of labels. ''' prefix = os.path.commonprefix(labels) if len(prefix) == 0: return None return prefix, [s.split(prefix)[1] for s in labels] + + +class TimeSeries(pd.DataFrame): + ''' Wrapper around pandas DataFrame to store timeseries data. ''' + + time_key = 't' + stim_key = 'stimstate' + + def __init__(self, t, stim, dout): + super().__init__(data={ + self.time_key: t, + self.stim_key: stim, + **dout + }) + + @property + def time(self): + return self[self.time_key].values + + @property + def tbounds(self): + return self.time.min(), self.time.max() + + @property + def stim(self): + return self[self.stim_key].values + + @property + def inputs(self): + return [self.time_key, self.stim_key] + + @property + def outputs(self): + return list(set(self.columns.values) - set(self.inputs)) + + def interpCol(self, t, k, kind): + ''' Interpolate a column according to a new time vector. ''' + kind = 'nearest' if k == self.stim_key else 'linear' + self[k] = interp1d(self.time, self[k].values, kind=kind)(t) + + def interp1d(self, t): + ''' Interpolate the entire dataframe according to a new time vector. ''' + for k in self.outputs: + self.interpCol(t, k, 'linear') + self.interpCol(t, self.stim_key, 'nearest') + self[self.time_key] = t + + def resample(self, dt): + ''' Resample dataframe at regular time step. ''' + tmin, tmax = self.tbounds + n = int((tmax - tmin) / dt) + 1 + self.interp1d(np.linspace(tmin, tmax, n)) + + def prepend(self, t0=0): + ''' Repeat first row outputs for a preceding time. ''' + if t0 > self.time.min(): + raise ValueError('t0 greater than minimal time value') + self.loc[-1] = self.iloc[0] # repeat first row + self.index = self.index + 1 # shift index + self.sort_index(inplace=True) + self[self.time_key][0] = t0 + self[self.stim_key][0] = 0 + + def bound(self, tbounds): + ''' Restrict all columns of dataframe to indexes corresponding to time values + within specific bounds. ''' + tmin, tmax = tbounds + return self[np.logical_and(self.time >= tmin, self.time <= tmax)].reset_index(drop=True) + + def checkAgainst(self, other): + assert isinstance(other, self.__class__), 'classes do not match' + assert all(self.keys() == other.keys()), 'differing keys' + for k in self.inputs: + assert all(self[k].values == other[k].values), f'{k} vectors do not match' + + def operate(self, other, op): + ''' Generic arithmetic operator. ''' + self.checkAgainst(other) + return self.__class__( + self.time, self.stim, + {k: getattr(self[k].values, op)(other[k].values) for k in self.outputs} + ) + + def __add__(self, other): + ''' Addition operator. ''' + return self.operate(other, '__add__') + + def __sub__(self, other): + ''' Subtraction operator. ''' + return self.operate(other, '__sub__') + + def __mul__(self, other): + ''' Multiplication operator. ''' + return self.operate(other, '__mul__') + + def __div__(self, other): + ''' Division operator. ''' + return self.operate(other, '__div__') diff --git a/tests/run_tests.sh b/tests/run_tests.sh deleted file mode 100644 index ffbc880..0000000 --- a/tests/run_tests.sh +++ /dev/null @@ -1,3 +0,0 @@ -# run_tests.sh -echo executing tests -C:/ProgramData/Anaconda3/python.exe ./tests/test_basic.py -t all \ No newline at end of file