Page MenuHomec4science

old.py
No OneTemporary

File Metadata

Created
Sat, Apr 27, 18:18
#!/usr/bin/python
def Average(x, dtIn, dtOut, axis=0, method='block', subsample=False):
"""
Make block average of x
x = vector to average
dtIn = time step of input vector along axis
dtOut = required time step
axis = axis along which to perform averagin (default: first)
method = 'block' (block average) or 'filter' (window smoothing)
subsample = subsample results of filter averaging
"""
import numpy as np
if method=='block':
# Make block average of data if subsampling is requested
if (dtOut%dtIn)==0:
SkipSamp = dtOut/dtIn
else:
raise ValueError('BlockAverage: can perform block averaging only if time steps are multiples')
if SkipSamp>1:
try:
x = np.ma.array(x, mask=x.mask)
except AttributeError:
x = np.ma.array(x, mask=np.zeros(x.shape, dtype=np.uint16))
mask = x.mask
length = x.shape[axis]
Split = np.arange(0, length, SkipSamp, dtype="int64")
Denom = np.hstack((np.diff(Split),length-Split[-1]))
inds = x.ndim * [np.newaxis]
inds[axis] = slice(None)
xMean = np.add.reduceat(x, Split, axis=axis) / Denom[inds]
try:
maskMean = np.uint16(np.add.reduceat(mask, Split, axis=axis))
except TypeError:
maskMean = np.zeros(xMean.shape, dtype=np.uint16)
return np.ma.array(xMean, mask=maskMean)
else:
return x
elif method=='filter':
from scipy.signal import hann
wint = hann(int(dtOut/dtIn)); wint/=wint.sum()
try:
x = np.ma.array(x, mask=x.mask)
except AttributeError:
x = np.ma.array(x, mask=np.zeros(x.shape, dtype=np.uint16))
mask = x.mask
lenIn = x.shape[axis]
if lenIn<wint.size:
raise ValueError('Data is too short.')
lenOut = lenIn*dtIn/dtOut + ((dtOut%dtIn)>0)
timeIn = np.linspace(0,1,lenIn)
xm = np.ma.filled(x, np.nan)
shapeOut = np.asarray(x.shape)
if subsample:
timeOut = np.linspace(0,1,lenOut)
shapeOut[axis] = lenOut
else:
timeOut = timeIn
xOut = np.ndarray(shapeOut)
iterate = list(x.shape)
iterate[axis] = 1
for i in np.ndindex(tuple(iterate)):
s = []
for dim in i:
s.append(slice(dim,dim+1))
s[axis] = slice(0, lenIn)
tmp = np.convolve(np.ravel(xm[s]), wint, mode='same')
if subsample:
s[axis] = slice(0,lenOut)
xOut[s] = np.reshape(
np.interp(timeOut, timeIn, tmp,
left=np.nan, right=np.nan),
xOut[s].shape)
else:
xOut[s] = np.reshape(tmp, xOut[s].shape)
return np.ma.array(xOut, mask=np.isnan(xOut))
else:
raise ValueError('Method not understood')
def FourierSpectrogram(Data, dt=1, detrend=True, N=None, window=None, \
smooth=None, p=0.05, alpha=3.0):
"""
Compute Fourier spectrogram
FourierSpectrogram(Data, dt=1, detrend=True, N=None, window=None,
smooth=None, p=0.05, alpha=3.0):
Data: 1d array
dt: time step of time series
detrend: detrend data
N: number of points for zero padding (if None, nearest power of 2)
window: either 'hann' or 'keiser'
smooth: number of point for band averaging
p: significance level for confidence interval
alpha: parameter for keiser window (2-3.5)
"""
import numpy as np
if len(Data.shape)!=1:
raise ValueError('Only takes vector input')
if detrend:
from scipy.signal import detrend
tsd = detrend(Data)
# Use hann window
if window=='hann':
from scipy.signal import hann
tsd *= hann(tsd.size)
# Use Kaiser window
elif window=='keiser':
from scipy.signal import kaiser
tsd *= kaiser(tsd.size, alpha)
if N==None:
N = 2**int(np.log2(tsd.size)+1)
# N/tsd.size for compensate zero padding
fft_pw = (N/np.float(Data.size))*np.abs(np.fft.fft(tsd,N))** 2 # FFT power spectrum
freqs = np.fft.fftfreq(N, dt) # frequencies of FFT
if window=='hann':
fft_pw *= ((8./3.)**0.5)
elif window=='keiser':
print ('Does Keiser-Bessel conserve power?')
# Normalization: Dt*abs(FFT)**2/N
# this way Df*sum(power)/N = variance
fft_power = dt*fft_pw[:N/2]/N
fft_power[1:] += dt*fft_pw[N/2+1:]/N
freqs = freqs[:N/2]
if smooth!=None:
sm_fft_power = np.convolve(fft_power,np.ones(smooth), mode='same')
sm_fft_power /= np.float(smooth)
# significance levels for smoothed spectra (pag.454 Data Analysis Methods in Phys. Oc.)
from scipy.stats import chi2
nu = 2*smooth
if window=='hann':
nu *= 8./3.
sig = np.diff([np.log10(nu/chi2.ppf(1-p/2,nu)), np.log10(nu/chi2.ppf(p/2,nu))])
if smooth==None:
return fft_power, freqs
else:
return fft_power, freqs, sm_fft_power, sig
def AverageSpectrum(Data, dt=1, detrend=True, N=None, p=0.05, \
window=None, alpha=3.0, all=False):
"""
Compute average spectrum
AverageSpectrum(Data, dt=1, detrend=True, N=None, p=0.05, \
window=None, alpha=3.0, All=False)
Data: 2d array
dt: time step of time series
detrend: detrend data
N: number of points for zero padding (if None, nearest power of 2)
window: either 'hann' or 'keiser'
p: significance level for confidence interval
alpha: parameter for keiser window (2-3.5)
all: output spectrum for each time series given
"""
import numpy as np
if len(Data.shape)!=2:
raise ValueError('Only takes 2-d array input')
Nt = Data.shape[1] # number of points in each time series
Ns = Data.shape[0] # number of time series
if detrend:
from scipy.signal import detrend
tsd = detrend(Data, axis=1)
# Use hann window
if window=='hann':
from scipy.signal import hann
tsd *= hann(Nt)
# Use Kaiser window
elif window=='keiser':
from scipy.signal import kaiser
tsd *= kaiser(Nt, alpha)
if N==None: # number of points in fourier transform
N = 2**int(np.log2(Nt)+1)
# N/tsd.size for compensate zero padding
fft_pw = (N/np.float(Nt))*np.abs(np.fft.fft(tsd,N, axis=1))** 2 # FFT power spectrum
freqs = np.fft.fftfreq(N, dt) # frequencies of FFT
if window=='hann':
fft_pw *= ((8./3.)**0.5)
elif window=='keiser':
print ('Does Keiser-Bessel conserve power?')
# Normalization: Dt*abs(FFT)**2/N
# this way Df*sum(power)/N = variance
fft_power = dt*fft_pw[:,:N/2]/N
fft_power[:,1:] += dt*fft_pw[:,N/2+1:]/N
sm_fft_power = fft_power.mean(axis=0)
freqs = freqs[:N/2]
# significance levels for smoothed spectra (pag.454 Data Analysis Methods in Phys. Oc.)
from scipy.stats import chi2
nu = 2*Ns
if window=='hann':
nu *= 8./3.
sig = np.diff([np.log10(nu/chi2.ppf(1-p/2,nu)), np.log10(nu/chi2.ppf(p/2,nu))])
if all:
return sm_fft_power, freqs, sig, fft_power
else:
return sm_fft_power, freqs, sig
def KernelEstimate(coords, nplot=200, nBS=1000, x='lin', norm=False):
"""
Compute kernel estimation of sample
coords: input data, 1d array or 2d array JxN with J the number of coordinates
and N the number of samples in each coordinate
nplot: number of bins (Default: 200)
nBS: number of bootstrapping samples (Default: 1000)
x: 'lin', 'log10', 'ln', linear of logarithmic pdf
norm: normalize data (Default: False)
"""
import numpy as np
from scipy.stats import gaussian_kde as kernel
np.random.seed(740)
coords = np.array(coords)
if len(coords.shape)>1:
dims = coords.shape[0]
else:
dims = 1
coords = np.atleast_2d(coords)
kernel.covariance_factor = kernel.silverman_factor #set method
pdf = np.zeros((dims,nplot),dtype=np.float64)
xout = np.zeros((dims,nplot),dtype=np.float64)
BSpdf = np.zeros((dims,nplot,nBS),dtype=np.float64)
if x=='log10':
coords = np.log10(coords)
elif x=='ln':
coords = np.log(coords)
elif x=='lin':
pass
else:
raise ValueError('x can only be either "log10", "ln" or "lin"')
# normalize if requested
if norm:
coords = (coords-coords.mean(axis=1))/coords.std(axis=1)
for dim in xrange(0,dims,1):
xout[dim,:] = np.linspace(np.min(coords[dim,:]),np.max(coords[dim,:]),nplot)
estimate = kernel(coords[dim,:])
pdf[dim,:] = estimate(xout[dim,:])
for k in xrange(0,nBS,1):
BSvec = coords[dim,np.random.random_integers(0,nplot-1,nplot)]
estimate = kernel(BSvec)
BSpdf[dim,:,k] = estimate(xout[dim,:])
BSstd = np.std(BSpdf,axis=2,ddof=1)
return np.squeeze(xout), np.squeeze(pdf), np.squeeze(BSstd)
def IWw(N,f,k):
"""
IWw(N,f,k)
Compute vertical phase velocity of an internal wave
"""
import numpy as np
k = np.array(k, dtype=np.float64)
if len(k)!=3:
raise ValueError('Wavevector must be 3d')
return np.sqrt(((k[0]**2+k[1]**2)*N**2+f**2*k[2]**2)/(k**2).sum())
def select(x,y,z,xrange,yrange):
"""
select(x,y,z,xrange,yrange)
A function that selects data from array z in the range given by
the xrange and yrange.
The coordinates to which z refers are given in x and y, each a 1d array
Returns: x, y, z in the selected range
"""
if (len(xrange)!=2) | (len(yrange)!=2):
raise IndexError('Ranges must have lenght 2')
r_x = np.nonzero((x>xrange[0]) & (x<xrange[1]))[0]
r_y = np.nonzero((y>yrange[0]) & (y<yrange[1]))[0]
return (x[r_x], y[r_y], z[r_y][:,r_x])
def ODRfit(xData,yData,xw=None,yw=None,model=None):
"""
Perform an odr fit of model
xData: vector of data points
yData: vector of measurements
xw: weights for x
yw: weights for y
model: function to fitted
"""
from scipy.odr import odrpack as odr
my_data = odr.Data (xData, yData, we=yw, wd=xw)
my_odr = odr.ODR(my_data,model)
# fit type 2 for least squares
my_odr.set_job(fit_type=2)
fit = my_odr.run()
# print results of fit
fit.pprint()
return fit.beta, fit.sd_beta
def xcorr2D(a, b=None, option='cor', alpha=0.05, err=None):
"""
2D Cross-correlation
a,b: array with shape (M,N). If b is not given, compute autocorrelation
option: normalization method:
cor - normalizes the sequence so that the correlations at
zero lag are identically 1.0.
cov - covariance (no normalisation)
alpha: significance level for confidence intervals
err: choose the method for error computation, either 'gauss' or an
integer number, interpreted as the number of samples for random phase
bootstrapping. If error is None (default), no confidence interval is
computed
RETURNS:
C: length (2*maxlag) cross-correlation array
cl,cu: lower/upper confidence intervals at given alpha
returned only if option='cor' and err='gauss', returned
only if err is not None
sl: significance level for the given alpha at each lag,
computed via random-phase bootstrapping, returned only
if option='cor' and err is an integer number
lags: two vector of lag indices in the two dimensions, sign convention
is b lagging a for positive lags.
"""
import numpy as np
from scipy.stats import norm
a = np.float64(np.array(a))
# We want 2d arrays
assert np.size(a.shape)==2
M,N = a.shape
# We do not want length-1 arrays
assert (M>1) & (N>1)
if b==None:
b=a
b = np.float64(np.array(b))
# Be must be a 2d array
assert np.size(b.shape)==2
assert (option in ('cov','cor'))
if (a.shape!=b.shape):
Ma,Na = a.shape; Mb,Nb = b.shape
M = max(Ma,Mb)
N = max(Na,Nb)
if (Ma<M)|(Na<N):
tmp = np.zeros(M,N)
tmp[:Ma,:Na] = a
a = tmp
elif (Mb<M)|(Nb<N):
tmp = np.zeros(M,N)
tmp[:Mb,:Nb] = b
b = tmp
# Detrend signals
from scipy.signal import detrend
a = detrend(a, axis=0)
a = detrend(a, axis=1)
b = detrend(b, axis=0)
b = detrend(b, axis=1)
# Compute cross correlation
Mfft = np.int(2**np.ceil(np.log2(M)))
Nfft = np.int(2**np.ceil(np.log2(N)))
c = np.real(np.fft.ifft2( \
np.conj(np.fft.fft2(a, (Mfft,Nfft))) * \
np.fft.fft2(b, (Mfft,Nfft))))
c = np.fft.fftshift(c)
if option=='cor':
# in this case, we normalise the correlation function
c /= a.std()*b.std()*M*N
if err==None:
return c,np.arange(Mfft)-Mfft/2, np.arange(Nfft)-Nfft/2
if err=='gauss':
# pag. 253 Emery Thomson
Zr = 0.5*(np.log(1+c)-np.log(1-c))
sz = ((N-3)*(M-3))**-0.5
rn = norm()
Zu = Zr + rn.ppf(alpha/2)*sz
cu = (np.exp(2*Zu)-1)/(1+np.exp(2*Zu))
Zl = Zr + rn.ppf(1-alpha/2)*sz
cl = (np.exp(2*Zl)-1)/(1+np.exp(2*Zl))
return c,cl,cu, np.arange(Mfft)-Mfft/2, np.arange(Nfft)-Nfft/2
elif np.dtype(type(err)).kind=='i':
# Ebisuzaki, JCLim 1997
fa = np.fft.fft2(a)
# random phase
fr = np.zeros((err,M,N), dtype=np.complex)
fr[:,1:,1:] = np.abs(fa[1:,1:])*\
np.exp(1j*np.random.uniform(high=2*np.pi,size=(err,M-1,N-1)))
if M%2==0:
if N%2==0:
fr[:,M/2,N/2] = 2**0.5*np.abs(fa[M/2,N/2])*\
np.cos(np.random.uniform(high=2*np.pi,size=(err)))
else:
fr[:,M/2,(N-1)/2:(N+1)/2+1] = \
2**0.5*np.abs(fa[M/2,(N-1)/2:(N+1)/2+1])*\
np.cos(np.random.uniform(high=2*np.pi,size=(err,2)))
else:
if N%2==0:
fr[:,(M-1)/2:(M+1)/2+1,N/2] = \
2**0.5*np.abs(fa[(M-1)/2:(M+1)/2+1,N/2])*\
np.cos(np.random.uniform(high=2*np.pi,size=(err,2)))
else:
fr[:,(M-1)/2:(M+1)/2+1,(N-1)/2:(N+1)/2+1] = \
2**0.5*np.abs(fa[(M-1)/2:(M+1)/2+1,(N-1)/2:(N+1)/2+1])*\
np.cos(np.random.uniform(high=2*np.pi,size=(err,2,2)))
# inverse transform
r = np.real(np.fft.ifft2(fr))
# Compute correlation of new random phase arrays
cr = np.real(np.fft.ifft2( \
np.conj(np.fft.fft2(r, (Mfft,Nfft), axes=(1,2))) * \
np.fft.fft2(b, (Mfft,Nfft))))
cr = np.fft.fftshift(cr, axes=(1,2))
for i in xrange(cr.shape[0]):
cr[i,:,:] /= r[i,:,:].std()
cr /= b.std()*M*N
return c, \
np.percentile(cr, alpha/2*100, axis=0), \
np.percentile(cr, (1-alpha/2)*100, axis=0), \
np.arange(Mfft)-Mfft/2, \
np.arange(Nfft)-Nfft/2
else:
raise ValueError('Error method can be either "gauss" or an integer for bootstrapping')
else:
return c, np.arange(Mfft)-Mfft/2, np.arange(Mfft)-Mfft/2
def nBins(array):
"""
Return number of bins according to Doane's formula
"""
from scipy.stats import skew
import numpy as np
n = array.size
g = skew(array)
s = np.sqrt(6*(n-2)/((n+1.)*(n+3.)))
if g>0:
return np.int(np.round(1+np.log2(n)+np.log2(np.abs(g)/s)))
else:
return np.int(np.round(1+np.log2(n)))
def Statistics(array,p=None, lims=None, Bins=None):
"""
Compute histogram of array, computing optimal number of bins
p: if p is given, the histogram will be computed from p to 100-p
percentiles. Otherwise p is guessed from the size of the array
lims: list with two elements. If passed, compute the histogram only
between the given extremes
"""
from scipy.stats import scoreatpercentile
import numpy as np
array = array[np.isfinite(array)].ravel()
if lims==None:
if p==None:
p = 100.0*array.size**(-0.5)
lim = (scoreatpercentile(array,p), scoreatpercentile(array,100-p))
else:
lim = lims
if Bins==None:
NumBins = nBins(array)
else:
NumBins = Bins
h,b = np.histogram(array, NumBins, density=True, range=lim)
return h,b
def hist2plot(bins, hist, ptype='line'):
"""
Given bins and pdf, it returns two arrays of the same size
which can be used to plot the histogram with plot
bins: length N+1 array
hist: length N array
ptype: either 'line' or 'point'. If line, returns data
to be plot as a line "step" plot, i.e. extremes
of the bins, if 'point' return average of bins
"""
from numpy import zeros,diff
if ptype=='line':
x = zeros(2*bins.size)
x[0] = bins[0]
x[1:-2:2] = bins[:-1]
x[2:-1:2] = bins[1:]
x[-1] = bins[-1]
y = zeros(x.size)
y[1:-2:2] = hist
y[2:-1:2] = hist
elif ptype=='point':
x = bins[:-1] + 0.5*diff(bins)
y = hist
return x,y
def myCmap(name):
"""
Returns a custum colormap, valid names are:
'cube1', 'cubeYF' and 'Linear_L'
see http://mycarta.wordpress.com/2014/04/25/convert-color-palettes-to-python-matplotlib-colormaps/
"""
import numpy as np
import matplotlib
linmap = np.loadtxt('/home/cimatori/installed/MyLib/{}_0-1.csv'.format(name), \
delimiter=',')
b3 = linmap[:,2] # value of blue at sample n
b2 = linmap[:,2] # value of blue at sample n
b1 = np.linspace(0,1,len(b2))
g3 = linmap[:,1] # value of blue at sample n
g2 = linmap[:,1] # value of blue at sample n
g1 = np.linspace(0,1,len(g2))
r3 = linmap[:,0] # value of blue at sample n
r2 = linmap[:,0] # value of blue at sample n
r1 = np.linspace(0,1,len(r2))
# creating list
R = zip(r1,r2,r3)
G = zip(g1,g2,g3)
B = zip(b1,b2,b3)
# transposing list
RGB = zip(R,G,B)
rgb = zip(*RGB)
# creating dictionary
k = ['red', 'green', 'blue']
LinMap = dict(zip(k,rgb)) # makes a dictionary from 2 lists
return matplotlib.colors.LinearSegmentedColormap(name,LinMap)
def plotLuminance(cmap):
"""
Plots the luminance of a given colormap
"""
import matplotlib
def lum(x):
return 0.2126 *x[:,0] + 0.7152 *x[:,1] + 0.0722 *x[:,2]
cmap = matplotlib.cm.get_cmap(cmap,256)
x = range(cmap.N)
cs = cmap(x)[::2,:]
matplotlib.pyplot.scatter(x[::2], lum(cs), c=cs)
matplotlib.pyplot.ylabel('Luminance', fontsize='x-large')
def lognorm_mean(x,*args,**kwargs):
"""
Compute the maximum likelihood estimator of the mean
for lognormally distributed data
Zhao and Gao 1997
"""
from numpy import log,nanmean,nanvar,exp
lx = log(x)
m = nanmean(lx,*args,**kwargs)
v = nanvar(lx,*args,ddof=1,**kwargs)
return exp(m+0.5*v)
def lognorm_mean_ci(x,siglev=0.95):
"""
Compute confidence interval of the mean of a lognormal distribution,
at significance level siglev
By defaultm, the significance level is 90%
distribution, according to Cox's method as described in Zhao and Gao 1997.
The function returns ci for the log of the mean, and thus the confidence
interval is given by multiplying and dividing the mean by the c. i.
"""
from numpy import nanvar,sqrt,size,exp,log
from scipy.stats import norm
lx = log(x)
s2 = nanvar(lx, ddof=1)
alpha = 1.0 - siglev
Z = norm.ppf(1-(alpha/2.0))
n = float(size(x))
return exp(Z*sqrt(s2/n+(s2**2)/(2*(n-1))))
class GPSConverter(object):
'''
GPS Converter class which is able to perform convertions between the
CH1903 and WGS84 system.
'''
# Convert CH y/x/h to WGS height
def CHtoWGSheight(self, y, x, h):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
h = (h + 49.55) - (12.60 * y_aux) - (22.64 * x_aux)
return h
# Convert CH y/x to WGS lat
def CHtoWGSlat(self, y, x):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
lat = (16.9023892 + (3.238272 * x_aux)) + \
- (0.270978 * pow(y_aux, 2)) + \
- (0.002528 * pow(x_aux, 2)) + \
- (0.0447 * pow(y_aux, 2) * x_aux) + \
- (0.0140 * pow(x_aux, 3))
# Unit 10000" to 1" and convert seconds to degrees (dec)
lat = (lat * 100) / 36
return lat
# Convert CH y/x to WGS long
def CHtoWGSlng(self, y, x):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
lng = (2.6779094 + (4.728982 * y_aux) + \
+ (0.791484 * y_aux * x_aux) + \
+ (0.1306 * y_aux * pow(x_aux, 2))) + \
- (0.0436 * pow(y_aux, 3))
# Unit 10000" to 1" and convert seconds to degrees (dec)
lng = (lng * 100) / 36
return lng
# Convert decimal angle (deg dec) to sexagesimal angle (dd.mmss,ss)
def DecToSexAngle(self, dec):
import numpy as np
degree = int(np.floor(dec))
minute = int(np.floor((dec - degree) * 60))
second = (((dec - degree) * 60) - minute) * 60
return degree + (float(minute) / 100) + (second / 10000)
# Convert sexagesimal angle (dd.mmss,ss) to seconds
def SexAngleToSeconds(self, dms):
import numpy as np
degree = 0
minute = 0
second = 0
degree = np.floor(dms)
minute = np.floor((dms - degree) * 100)
second = (((dms - degree) * 100) - minute) * 100
return second + (minute * 60) + (degree * 3600)
# Convert sexagesimal angle (dd.mmss) to decimal angle (degrees)
def SexToDecAngle(self, dms):
import numpy as np
degree = 0
minute = 0
second = 0
degree = np.floor(dms)
minute = np.floor((dms - degree) * 100)
second = (((dms - degree) * 100) - minute) * 100
return degree + (minute / 60) + (second / 3600)
# Convert WGS lat/long (deg dec) and height to CH h
def WGStoCHh(self, lat, lng, h):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
h = (h - 49.55) + (2.73 * lng_aux) + (6.94 * lat_aux)
return h
# Convert WGS lat/long (deg dec) to CH x
def WGStoCHx(self, lat, lng):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
x = ((200147.07 + (308807.95 * lat_aux) + \
+ (3745.25 * pow(lng_aux, 2)) + \
+ (76.63 * pow(lat_aux,2))) + \
- (194.56 * pow(lng_aux, 2) * lat_aux)) + \
+ (119.79 * pow(lat_aux, 3))
return x
# Convert WGS lat/long (deg dec) to CH y
def WGStoCHy(self, lat, lng):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
y = (600072.37 + (211455.93 * lng_aux)) + \
- (10938.51 * lng_aux * lat_aux) + \
- (0.36 * lng_aux * pow(lat_aux, 2)) + \
- (44.54 * pow(lng_aux, 3))
return y
def LV03toWGS84(self, east, north, height):
'''
Convert LV03 to WGS84 Return a array of double that contain lat, long,
and height
'''
d = []
d.append(self.CHtoWGSlat(east, north))
d.append(self.CHtoWGSlng(east, north))
d.append(self.CHtoWGSheight(east, north, height))
return d
def WGS84toLV03(self, latitude, longitude, ellHeight):
'''
Convert WGS84 to LV03 Return an array of double that contaign east,
north, and height
'''
d = []
d.append(self.WGStoCHy(latitude, longitude))
d.append(self.WGStoCHx(latitude, longitude))
d.append(self.WGStoCHh(latitude, longitude, ellHeight))
return d
def es_calc(airtemp):
'''
Function to calculate saturated vapour pressure from temperature.
For T<0 C the saturation vapour pressure equation for ice is used
accoring to Goff and Gratch (1946), whereas for T>=0 C that of
Goff (1957) is used.
Parameters:
- airtemp : (data-type) measured air temperature [Celsius].
Returns:
- es : (data-type) saturated vapour pressure [Pa].
References
----------
- Goff, J.A.,and S. Gratch, Low-pressure properties of water from -160 \
to 212 F. Transactions of the American society of heating and \
ventilating engineers, p. 95-122, presented at the 52nd annual \
meeting of the American society of \
heating and ventilating engineers, New York, 1946.
- Goff, J. A. Saturation pressure of water on the new Kelvin \
temperature scale, Transactions of the American \
society of heating and ventilating engineers, pp 347-354, \
presented at the semi-annual meeting of the American \
society of heating and ventilating engineers, Murray Bay, \
Quebec. Canada, 1957.
Examples
--------
>>> es_calc(30.0)
4242.725994656632
>>> x = [20, 25]
>>> es_calc(x)
array([ 2337.08019792, 3166.82441912])
'''
import numpy as np
# Test input array/value
airtemp = np.asarray(airtemp, dtype='double')
# Calculate saturated vapour pressures, distinguish between water/ice
negative = airtemp < 0
# Calculate saturation vapour pressure for ice
log_pi = - 9.09718 * (273.16 / (airtemp[negative] + 273.15) - 1.0) \
- 3.56654 * np.log10(273.16 / (airtemp[negative] + 273.15)) \
+ 0.876793 * (1.0 - (airtemp[negative] + 273.15) / 273.16) \
+ np.log10(6.1071)
# Calculate saturation vapour pressure for water
log_pw = 10.79574 * (1.0 - 273.16 / (airtemp[~negative] + 273.15)) \
- 5.02800 * np.log10((airtemp[~negative] + 273.15) / 273.16) \
+ 1.50475E-4 * (1 - np.power(10, (-8.2969 * ((airtemp[~negative] +\
273.15) / 273.16 - 1.0)))) + 0.42873E-3 * \
(np.power(10, (+4.76955 * (1.0 - 273.16\
/ (airtemp[~negative] + 273.15)))) - 1) + 0.78614
es = np.zeros_like(airtemp)
es[negative] = np.power(10, log_pi)
es[~negative] = np.power(10, log_pw)
# Convert from hPa to Pa
es = es * 100.0
return es # in Pa

Event Timeline