本文整理汇总了Python中scipy.sqrt函数的典型用法代码示例。如果您正苦于以下问题:Python sqrt函数的具体用法?Python sqrt怎么用?Python sqrt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sqrt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: V_Multi
def V_Multi(m, n):
x,y,z=(m[0],m[1],m[2])
a,b,c=(n[0],n[1],n[2])
p,q,r = (a-x,b-y,c-z)
return 90-degrees(acos( (x*p+y*q+z*r)/ (sqrt(x**2+y**2+z**2)*sqrt(p**2+q**2+r**2))))
示例2: get_stderr_fit
def get_stderr_fit(f,Xdata,popt,pcov):
Y= f(Xdata, popt)
listdY=[]
for i in xrange(len(popt)):
p=popt[i]
dp= abs(p)/1e6+1e-20
popt[i]+=dp
Yi= f(Xdata, popt)
dY= (Yi-Y)/dp
listdY.append(dY)
popt[i]-=dp
listdY= scipy.array(listdY)
#list dy is the d in the derivation. it has N X M
#pcov is N X N
left= scipy.dot(listdY.T,pcov)
right=scipy.dot(left,listdY)
sigma2y= right.diagonal()
#sigma2y is a standard function of fit
mean_sigma2y= scipy.mean(right.diagonal())
M= Xdata.shape[0]
N= len(popt)
avg_stddev_data=scipy.sqrt(M*mean_sigma2y/N)
sigmay= scipy.sqrt(sigma2y)
return sigmay,avg_stddev_data
示例3: Rz_to_coshucosv
def Rz_to_coshucosv(R,z,delta=1.):
"""
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
d12= (z+delta)**2.+R**2.
d22= (z-delta)**2.+R**2.
coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
return (coshu,cosv)
示例4: survival_function
def survival_function(loss_ratio, **kwargs):
"""
Static method that prepares the calculation parameters
to be passed to stats.lognorm.sf
:param loss_ratio: current loss ratio
:type loss_ratio: float
:param kwargs: convenience dictionary
:type kwargs: :py:class:`dict` with the following
keys:
**vf** - vulnerability function as provided by
:py:class:`openquake.shapes.VulnerabilityFunction`
**col** - matrix column number
"""
vuln_function = kwargs.get('vf')
position = kwargs.get('col')
vf_loss_ratio = vuln_function.loss_ratios[position]
stddev = vuln_function.covs[position] * vf_loss_ratio
variance = stddev ** 2.0
sigma = sqrt(log((variance / vf_loss_ratio ** 2.0) + 1.0))
mu = exp(log(vf_loss_ratio ** 2.0 /
sqrt(variance + vf_loss_ratio ** 2.0)))
return stats.lognorm.sf(loss_ratio, sigma, scale=mu)
示例5: specular_incidence
def specular_incidence(self, pol = 'TE'):
""" Return a vector of plane wave amplitudes corresponding
to specular incidence in the specified polarisation.
i.e. all elements are 0 except the zeroth order.
"""
# Element corresponding to 0th order, TE
spec_TE = self.specular_order
# Element corresponding to 0th order, TM
spec_TM = self.specular_order + self.structure.num_pw_per_pol
tot_num_pw = self.structure.num_pw_per_pol * 2
inc_amp = np.mat(np.zeros(tot_num_pw, dtype='complex128')).T
if 'TE' == pol:
inc_amp[spec_TE] = 1
elif 'TM' == pol:
inc_amp[spec_TM] = 1
elif 'R Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = +1j/sqrt(2.)
elif 'L Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = -1j/sqrt(2.)
else:
raise NotImplementedError, \
"Must select from the currently implemented polarisations; \
TE, TM, R Circ, L Circ."
return inc_amp
示例6: _getScalesDiag
def _getScalesDiag(self,termx=0):
"""
Internal function for parameter initialization
Uses 2 term single trait model to get covar params for initialization
Args:
termx: non-noise term terms that is used for initialization
"""
assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'
assert self.noisPos!=None, 'VarianceDecomposition:: noise term has to be set'
assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'
assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'
assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
scales = []
res = self._getH2singleTrait(self.vd.getTerm(termx).getK())
scaleg = sp.sqrt(res['varg'].mean())
scalen = sp.sqrt(res['varn'].mean())
for term_i in range(self.n_randEffs):
if term_i==termx:
_scales = scaleg*self.diag[term_i]
elif term_i==self.noisPos:
_scales = scalen*self.diag[term_i]
else:
_scales = 0.*self.diag[term_i]
if self.jitter[term_i]>0:
_scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
scales.append(_scales)
return sp.concatenate(scales)
示例7: test_periodogram_csd
def test_periodogram_csd():
"""Test corner cases of periodogram_csd"""
arsig1, _, _ = utils.ar_generator(N=1024)
arsig2, _, _ = utils.ar_generator(N=1024)
tseries = np.vstack([arsig1, arsig2])
Sk = np.fft.fft(tseries)
f1, c1 = tsa.periodogram_csd(tseries)
f2, c2 = tsa.periodogram_csd(tseries, Sk=Sk)
npt.assert_equal(c1, c2)
# Check that providing a complex signal does the right thing
# (i.e. two-sided spectrum):
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig1 = r + c * scipy.sqrt(-1)
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig2 = r + c * scipy.sqrt(-1)
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.periodogram_csd(tseries)
npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
示例8: drazin
def drazin(A, tol):
CB = A.copy()
Bs = []
Cs = []
k = 1
while not (sp.absolute(CB) < tol).all() and sp.absolute(la.det(CB)) < tol:
U, s, Vh = la.svd(CB)
S = sp.diag(s)
S = S * (S > tol)
r = sp.count_nonzero(S)
B = sp.dot(U, sp.sqrt(S))
C = sp.dot(sp.sqrt(S), Vh)
B = B[:, 0:r]
Bs.append(B)
C = C[0:r, :]
Cs.append(C)
CB = sp.dot(C, B)
k += 1
D = sp.eye(A.shape[0])
for B in Bs:
D = sp.dot(D, B)
if (sp.absolute(CB) < tol).all():
D = sp.dot(D, CB)
else:
D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
for C in reversed(Cs):
D = sp.dot(D, C)
return D
示例9: ndot_product
def ndot_product(features1,
features2 = None):
"""
generates kernel based on normalized dot product
input :
features1 : vectors representing the rows in the matrix
features2 : vectors representing the columns in the matrix
output :
out : similarity matrix
"""
features1.shape = features1.shape[0], -1
features1 = features1/sp.sqrt((features1**2.).sum(1))[:, None]
if features2 is None:
features2 = features1
else:
features2.shape = features2.shape[0], -1
features2 = features2/sp.sqrt((features2**2.).sum(1))[:, None]
out = sp.dot(features1, features2.T)
return out
示例10: _genBgTerm_fromXX
def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
"""
generate background term from SNPs
Args:
vTot: variance of Yc+Yi
vCommon: variance of Yc
XX: kinship matrix
a: common scales, it can be set for debugging purposes
c: indipendent scales, it can be set for debugging purposes
"""
vSpecific = vTot-vCommon
SP.random.seed(0)
if c==None: c = SP.randn(self.P)
XX += 1e-3 * SP.eye(XX.shape[0])
L = LA.cholesky(XX,lower=True)
# common effect
R = self.genWeights(self.N,self.P)
A = self.genTraitEffect()
if a is not None: A[0,:] = a
Yc = SP.dot(L,SP.dot(R,A))
Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())
# specific effect
R = SP.randn(self.N,self.P)
Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())
return Yc, Yi
示例11: _sampling_matrix
def _sampling_matrix(hessian, cutoff=0, temperature=1, step_scale=1):
# basically need SVD of hessian - singular values and eigenvectors
# hessian = u * diag(singVals) * vh
u, sing_vals, vh = scipy.linalg.svd(0.5 * hessian)
# scroll through the singular values and find the ones whose inverses will
# be huge and set them to zero also, load up the array of singular values
# that we store
# cutoff = (1.0/_.singVals[0])*1.0e03
# double cutoff = _.singVals[0]*1.0e-02
cutoff_sing_val = cutoff * max(sing_vals)
D = 1.0/scipy.maximum(sing_vals, cutoff_sing_val)
## now fill in the sampling matrix ("square root" of the Hessian)
## note that sqrt(D[i]) is taken here whereas Kevin took sqrt(D[j])
## this is because vh is the transpose of his PT -JJW
samp_mat = scipy.transpose(vh) * scipy.sqrt(D)
# Divide the sampling matrix by an additional factor such
# that the expected quadratic increase in cost will be about 1.
cutoff_vals = scipy.compress(sing_vals < cutoff_sing_val, sing_vals)
if len(cutoff_vals):
scale = scipy.sqrt(len(sing_vals) - len(cutoff_vals)
+ sum(cutoff_vals)/cutoff_sing_val)
else:
scale = scipy.sqrt(len(sing_vals))
samp_mat /= scale
samp_mat *= step_scale
samp_mat *= scipy.sqrt(temperature)
return samp_mat
示例12: test_gets_thermal_with_correlated
def test_gets_thermal_with_correlated(self):
"""Checks that the part of the freq_modes code that compensates the
thermal for mode subtraction works."""
self.data *= sp.sqrt(self.bw * 2) # Makes thermal unity.
# Need to add something correlated so the modes arn't just the
# channels.
correlated_overf = noise_power.generate_overf_noise(1,
-2, 0.5, self.dt, self.data.shape[0])
correlated_overf += (rand.normal(size=(self.data.shape[0],))
* sp.sqrt((self.bw * 2) * 0.3))
self.data += correlated_overf[:,None,None,None] / sp.sqrt(self.nf)
Blocks = self.make_blocks()
# Mask a channel out completly.
for Data in Blocks:
Data.data[:,:,:,3] = ma.masked
model = 'freq_modes_over_f_4' # Take out 20% of the thermal power.
parameters = mn.measure_noise_parameters(Blocks,
[model])
right_ans = sp.ones(self.nf)
right_ans[3] = T_infinity**2
for p in parameters.itervalues():
pars = p[model]
thermal = pars['thermal']
self.assertTrue(sp.allclose(thermal,
right_ans, rtol=0.3))
mean_thermal = sp.mean(thermal[right_ans==1])
self.assertTrue(sp.allclose(mean_thermal, 1, rtol=0.05))
self.assertTrue(sp.allclose(pars['over_f_mode_0']['thermal'],
0.3, atol=0.1))
示例13: ZYFF
def ZYFF(Te, EIJ):
"""Computes `ZY` and `FF`, used in other functions.
If `EIJ` is a scalar, the output has the same shape as `Te`. If `EIJ` is an
array, the output has shape `EIJ.shape` + `Te.shape`. This should keep the
output broadcastable with `Te`.
Parameters
----------
Te : array of float
Electron temperature. Shape is arbitrary.
EIJ : scalar float or array of float
Energy difference.
"""
# Expand the dimensions of EIJ to produce the desired output shape:
Te = scipy.asarray(Te, dtype=float)
EIJ = scipy.asarray(EIJ, dtype=float)
for n in xrange(Te.ndim):
EIJ = scipy.expand_dims(EIJ, axis=-1)
ZY = EIJ / (1e3 * Te)
FF = scipy.zeros_like(ZY)
mask = (ZY >= 1.5)
FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 * scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
mask = ~mask
FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 / scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
return ZY, FF
示例14: heart
def heart(scale,ndim,time):
percent = 1.05 + 0.5*np.random.rand()
real_heart = int(time*percent)
ratio = 0.5*np.random.rand() #xy ratio
x = scipy.linspace(-2,2,real_heart/2)
y1 = scipy.sqrt(1-(abs(x)-1)**2)
y2 = -3*scipy.sqrt(1-(abs(x[::-1])/2)**0.5)
Y = np.concatenate([y1,y2])
X = ratio*np.concatenate([x,x[::-1]])
shift = np.random.randint(0,real_heart)
Y = np.roll(Y,shift)[:time]
X = np.roll(X,shift)[:time]
traj = np.array([X,Y,np.zeros_like(Y)]).T
alpha = 2*3.14 * np.random.rand()
if ndim == 2:
traj = traj[::,:2]
noise = diffusive(scale/800.,ndim,time+1,epsilon=1e-7)
return scale*random_rot(traj,alpha,ndim) + noise[:-1]-noise[1:]
示例15: get_stderr_fit
def get_stderr_fit(f,Xdata,popt,pcov):
Y=f(Xdata,popt)
listdY=[]
for i in xrange(len(popt)):
p=popt[i]
dp=abs(p)/1e6+1e-20
popt[i]+=dp
Yi=f(Xdata,popt)
dY=(Yi-Y)/dp
listdY.append(dY)
popt[i]-=dp
listdY=scipy.array(listdY)
#listdY is an array with N rows and M columns, N=len(popt), M=len(xdata[0])
#pcov is an array with N rows and N columns
left=scipy.dot(listdY.T,pcov)
#left is an array of M rows and N columns
right=scipy.dot(left,listdY)
#right is an array of M rows and M columns
sigma2y=right.diagonal()
#sigma2y is standard error of fit and function of X
mean_sigma2y=scipy.mean(right.diagonal())
M=Xdata.shape[1];print M
N=len(popt);print N
avg_stddev_data=scipy.sqrt(M*mean_sigma2y/N)
#this is because if exp error is constant at sig_dat,then mean_sigma2y=N/M*sig_dat**2
sigmay=scipy.sqrt(sigma2y)
return sigmay,avg_stddev_data