本文整理汇总了Python中scipy.absolute函数的典型用法代码示例。如果您正苦于以下问题:Python absolute函数的具体用法?Python absolute怎么用?Python absolute使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了absolute函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculateFFT
def calculateFFT(self, duration, framerate, sample):
"""
Calculates FFT for a given sound wave.
Considers only frequencies with the magnitudes higher than
a given threshold.
"""
fft_length = int(duration * framerate)
fft_length = get_next_power_2(fft_length)
FFT = numpy.fft.fft(sample, n=fft_length)
''' ADJUSTING THRESHOLD '''
threshold = 0
power_spectra = []
for i in range(len(FFT) / 2):
power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
if power_spectrum > threshold:
threshold = power_spectrum
power_spectra.append(power_spectrum)
threshold *= 0.1
binResolution = float(framerate) / float(fft_length)
frequency_power = []
# For each bin calculate the corresponding frequency.
for k in range(len(FFT) / 2):
binFreq = k * binResolution
if binFreq > self.minFreqConsidered and binFreq < self.maxFreqConsidered:
power_spectrum = power_spectra[k]
#dB = 10*math.log10(power_spectrum)
if power_spectrum > threshold:
frequency_power.append((binFreq, power_spectrum))
return frequency_power
示例2: _LMLgrad_lik
def _LMLgrad_lik(self,hyperparams):
"""derivative of the likelihood parameters"""
logtheta = hyperparams['covar']
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
#loop through all dimensions
#logdet term:
Kd = 2*KV['Knoise']
dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
#quadratic term
y_roti = KV['y_roti']
dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
if VERBOSE:
dldet_ = SP.zeros([self.d])
dlquad_ = SP.zeros([self.d])
for d in xrange(self.d):
_K = KV['K'] + SP.diag(KV['Knoise'][:,d])
_Ki = SP.linalg.inv(_K)
dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))
assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'
LMLgrad = dldet + dlquad
RV = {'lik': LMLgrad}
return RV
示例3: train_srs
def train_srs():
# Load train set
print 'Loading files'
X, Y = dataIO.train_set(setsize)
X = scipy.absolute(X)
Y = scipy.absolute(Y)
scale = np.mean(X)
var = np.std(X)
print("Scale: " + str(scale))
Y = (Y - scale)/var
X = (X - scale)/var
# Create net
print 'Building RNN'
rnn = DNN.DNN(512, hidden_layer, nodes, X,
loss=neural_network.loss_function.source_separation_loss_function, activation=activation)
# Train net
print 'Training'
rnn.fit(X, Y, nb_epoch=nb_epoch, batch_size=batch_size)
# Save net
print 'Saving'
rnn.save()
return rnn
示例4: rendGauss
def rendGauss(x,y, sx, imageBounds, pixelSize):
fuzz = 3*scipy.median(sx)
roiSize = int(fuzz/pixelSize)
fuzz = pixelSize*roiSize
X = numpy.arange(imageBounds.x0 - fuzz,imageBounds.x1 + fuzz, pixelSize)
Y = numpy.arange(imageBounds.y0 - fuzz,imageBounds.y1 + fuzz, pixelSize)
#print X
im = scipy.zeros((len(X), len(Y)), 'f')
#record our image resolution so we can plot pts with a minimum size equal to res (to avoid missing small pts)
delX = scipy.absolute(X[1] - X[0])
for i in range(len(x)):
ix = scipy.absolute(X - x[i]).argmin()
iy = scipy.absolute(Y - y[i]).argmin()
sxi = max(sx[i], delX)
imp = Gauss2D(X[(ix - roiSize):(ix + roiSize + 1)], Y[(iy - roiSize):(iy + roiSize + 1)],1/sxi, x[i],y[i],sxi)
im[(ix - roiSize):(ix + roiSize + 1), (iy - roiSize):(iy + roiSize + 1)] += imp
im = im[roiSize:-roiSize, roiSize:-roiSize]
return im
示例5: dispersion_relation_extraordinary
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c):
"""Dispersion relation for the extraordinary wave.
NOTE
See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous
coupled-wave analysis of anisotropic grating diffraction",
JOSA A, 7(8), 1990 Always give positive real or negative
imaginary.
"""
if kx.shape != ky.shape or c.size != 3:
raise ValueError('kx and ky must have the same length and c must have 3 components')
kz = S.empty_like(kx)
for ii in xrange(0, kx.size):
alpha = nE**2 - nO**2
beta = kx[ii]/k * c[0] + ky[ii]/k * c[1]
# coeffs
C = S.array([nO**2 + c[2]**2 * alpha, \
2. * c[2] * beta * alpha, \
nO**2 * (kx[ii]**2 + ky[ii]**2) / k**2 + alpha * beta**2 - nO**2 * nE**2])
# two solutions of type +x or -x, purely real or purely imag
tmp_kz = k * S.roots(C)
# get the negative imaginary part or the positive real one
if S.any(S.isreal(tmp_kz)):
kz[ii] = S.absolute(tmp_kz[0])
else:
kz[ii] = -1j * S.absolute(tmp_kz[0])
return kz
示例6: extendPWMs
def extendPWMs(pwm1, pwm2, offset2=0, fillValue=.25):
"""Extend both PWMs so that they are the same length by filling in values from fillValue.
Optionally, a positive or negative offset for motif2 can be specified and both motifs will be filled.
fillValue may be a number, or a 4-element array with nuc frequencies
Returns (extendedPwm1, extendedPwm2) as 2D lists
"""
# check for errors, convert pwms to list if necessary
if type(fillValue) != list:
fillValue = [fillValue] * 4 # extend to 4 nucleotides
elif len(fillValue) != 4:
raise RuntimeError('fillValue for extendPWMs must be a single number or a 4-element list!')
if type(pwm1) == scipy.ndarray:
pwm1 = pwm1.tolist()
if type(pwm2) == scipy.ndarray:
pwm2 = pwm2.tolist()
if offset2 < 0:
# prepend filler for pwm1
pwm1 = [fillValue] * scipy.absolute(offset2) + pwm1
elif offset2 > 0:
# prepend filler for pwm2
pwm2 = [fillValue] * scipy.absolute(offset2) + pwm2
# extend the pwms as necessary on the right side
pwm1 = pwm1 + [fillValue] * (len(pwm2) - len(pwm1))
pwm2 = pwm2 + [fillValue] * (len(pwm1) - len(pwm2))
return pwm1, pwm2
示例7: getAxis
def getAxis(self,X,Y):
"""
return the proper axis limits for the plots
"""
out = []
mM = [(min(X),max(X)),(min(Y),max(Y))]
for i,j in mM:
#YJC: checking if values are negative, if yes, return 0 and break
if j <0 or i <0:
return 0
log_i = scipy.log10(i)
d, I = scipy.modf(log_i)
if log_i < 0:
add = 0.5 *(scipy.absolute(d)<0.5)
else:
add = 0.5 *(scipy.absolute(d)>0.5)
m = scipy.floor(log_i) + add
out.append(10**m)
log_j = scipy.log10(j)
d, I = scipy.modf(log_j)
if log_j < 0:
add = - 0.5 *(scipy.absolute(d)>0.5)
else:
add = - 0.5 *(scipy.absolute(d)<0.5)
m = scipy.ceil(log_j) + add
out.append(10**m)
return tuple(out)
示例8: process_maps
def process_maps(aper_map, data_map1, data_map2, args):
r"""
subtracts the data maps and then calculates percentiles of the result
before outputting a final map to file.
"""
#
# creating resultant map from clone of aperture map
result = aper_map.clone()
result.data_map = data_map1 - data_map2
result.data_vector = sp.ravel(result.data_map)
result.infile = args.out_name
result.outfile = args.out_name
#
print('Percentiles of data_map1 - data_map2')
output_percentile_set(result, args)
#
# checking if data is to be normalized and/or absolute
if args.post_abs:
result.data_map = sp.absolute(result.data_map)
result.data_vector = sp.absolute(result.data_vector)
#
if args.post_normalize:
result.data_map = result.data_map/sp.amax(sp.absolute(result.data_map))
result.data_vector = sp.ravel(result.data_map)
#
return result
示例9: selectTraits
def selectTraits(self,phenoMAF=None,corrMin=None,nUnique=False):
"""
use only a subset of traits
filter out all individuals that have missing values for the selected ones
"""
self.idx_samples = SP.ones(self.n_s,dtype=bool)
# filter out nan samples
self.idx_samples[SP.isnan(self.Y[:,self.idx_traits]).any(1)] = False
# filter out phenotypes that are not diverse enough
if phenoMAF!=None:
expr_mean = self.Y[self.idx_samples].mean(0)
expr_std = self.Y[self.idx_samples].std(0)
z_scores = SP.absolute(self.Y[self.idx_samples]-expr_mean)/SP.sqrt(expr_std)
self.idx_traits[(z_scores>1.5).mean(0) < phenoMAF] = False
# use only correlated phenotypes
if corrMin!=None and self.Y.shape[1]>1:
corr = SP.corrcoef(self.Y[self.idx_samples].T)
corr-= SP.eye(corr.shape[0])
self.idx_traits[SP.absolute(corr).max(0)<0.3] = False
# filter out binary phenotypes
if nUnique and self.Y.shape[1]>1:
for i in range(self.Y.shape[1]):
if len(SP.unique(self.Y[self.idx_samples][:,i]))<=nUnique:
self.idx_traits[i] = False
LG.debug('number of traits(before filtering): %d'%self.n_t)
LG.debug('number of traits(after filtering): %d'%self.idx_traits.sum())
LG.debug('number of samples(before filtering): %d'%self.n_s)
LG.debug('number of samples(after filtering): %d'%self.idx_samples.sum())
示例10: calculateFFT
def calculateFFT(self, duration, framerate, sample):
"""
Calculates FFT for a given sound wave.
Considers only frequencies with the magnitudes higher than
a given threshold.
"""
fft_length = int(duration * framerate)
# For the FFT to work much faster take the length that is a power of 2.
fft_length = get_next_power_2(fft_length)
FFT = numpy.fft.fft(sample, n=fft_length)
''' ADJUSTING THRESHOLD - HIGHEST SPECTRAL PEAK METHOD'''
threshold = 0
power_spectra = []
frequency_bin_with_max_spectrum = 0
for i in range(len(FFT) / 2):
power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
if power_spectrum > threshold:
threshold = power_spectrum
frequency_bin_with_max_spectrum = i
power_spectra.append(power_spectrum)
max_power_spectrum = threshold
threshold *= 0.1
binFrequencies = []
magnitudes = []
binResolution = float(framerate) / float(fft_length)
sum_of_significant_spectra = 0
# For each bin calculate the corresponding frequency.
for k in range(len(FFT)):
binFreq = k * binResolution
# Truncating the FFT so we consider only hearable frequencies.
if binFreq > self.maxFreqConsidered:
FFT = FFT[:k]
break
elif binFreq > self.minFreqConsidered:
# Consider only the frequencies
# with magnitudes higher than the threshold.
power_spectrum = power_spectra[k]
if power_spectrum > threshold:
magnitudes.append(power_spectrum)
binFrequencies.append(binFreq)
# Sum all significant power spectra
# except the max power spectrum.
if power_spectrum != max_power_spectrum:
sum_of_significant_spectra += power_spectrum
significant_freq = 0.0
if max_power_spectrum > sum_of_significant_spectra:
significant_freq = frequency_bin_with_max_spectrum * binResolution
# Max. frequency considered after truncating.
# maxFreq = rate without truncating.
maxFreq = len(FFT) / duration
return (FFT, binFrequencies, maxFreq, magnitudes, significant_freq)
示例11: testGaussianPValue
def testGaussianPValue(self):
for typePair in [(None, "float32"), ("tomo", None)]:
mtype = typePair[0]
dtype = typePair[1]
mean = 32000.0
stdd = 1000.0
noisDds = mango.data.gaussian_noise(shape=(105,223,240), mean=mean, stdd=stdd, mtype=mtype, dtype=dtype)
pvalDds = \
mango.fmm.gaussian_pvalue(
noisDds,
mean=mean,
stdd=stdd,
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(pvalDds.asarray() <= alpha, 1, 0))
if (pvalDds.mpi.comm != None):
count = pvalDds.mpi.comm.allreduce(count)
expCount = sp.product(noisDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
示例12: testMomentOfInertiaRotatedEllipsoid
def testMomentOfInertiaRotatedEllipsoid(self):
img = mango.zeros(shape=self.imgShape*2, mtype="tomo", origin=(0,0,0))
img.md.setVoxelSize((1,1,1))
img.md.setVoxelSizeUnit("mm")
c = (sp.array(img.origin) + img.origin + img.shape-1)*0.5
r = sp.array(img.shape-1)*0.25
mango.data.fill_ellipsoid(img, centre=c, radius=r, fill=512)
rMatrix = \
(
mango.image.rotation_matrix(-25, 2).dot(
mango.image.rotation_matrix( 10, 1).dot(
mango.image.rotation_matrix( 45, 0)
))
)
img = mango.image.affine_transform(img, rMatrix, offset=c-img.origin, interptype=mango.image.InterpolationType.CATMULL_ROM_CUBIC_SPLINE)
#mango.io.writeDds("tomoMoiRotEllipsoid.nc", img)
pmoi, pmoi_axes, com = mango.image.moment_of_inertia(img)
rootLogger.info("rmtx = \n%s" % (rMatrix,))
rootLogger.info("pmoi = \n%s" % (pmoi,))
rootLogger.info("pmoi_axes = \n%s" % (pmoi_axes,))
rootLogger.info("c = %s, com = %s" % (c, com))
self.assertTrue(sp.all(sp.absolute(c - com) <= 1.0e-10))
self.assertLess(pmoi[0], pmoi[1])
self.assertLess(pmoi[1], pmoi[2])
self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,0]-rMatrix[:,2]) <= 1.0e-3))
self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,1]-rMatrix[:,1]) <= 1.0e-3))
self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,2]-rMatrix[:,0]) <= 1.0e-3))
示例13: cut
def cut(self):
average = sp.sum(sp.absolute(self.data))/sp.size(self.data)
head = sp.nonzero(sp.absolute(self.data)>average)[0][5]
bottom = sp.nonzero(sp.absolute(self.data)>average)[0][-1]
self.data = self.data[head:bottom]
self.duration_list = self.duration_list[head:bottom]
self.duration = self.duration_list[-1] - self.duration_list[0]
示例14: domain_length
def domain_length(self,face_1,face_2):
r'''
Calculate the distance between two faces of the network
Parameters
----------
face_1 and face_2 : array_like
Lists of pores belonging to opposite faces of the network
Returns
-------
The length of the domain in the specified direction
Notes
-----
- Does not yet check if input faces are perpendicular to each other
'''
#Ensure given points are coplanar before proceeding
if misc.iscoplanar(self['pore.coords'][face_1]) and misc.iscoplanar(self['pore.coords'][face_2]):
#Find distance between given faces
x = self['pore.coords'][face_1]
y = self['pore.coords'][face_2]
Ds = misc.dist(x,y)
L = sp.median(sp.amin(Ds,axis=0))
else:
logger.warning('The supplied pores are not coplanar. Length will be approximate.')
f1 = self['pore.coords'][face_1]
f2 = self['pore.coords'][face_2]
distavg = [0,0,0]
distavg[0] = sp.absolute(sp.average(f1[:,0]) - sp.average(f2[:,0]))
distavg[1] = sp.absolute(sp.average(f1[:,1]) - sp.average(f2[:,1]))
distavg[2] = sp.absolute(sp.average(f1[:,2]) - sp.average(f2[:,2]))
L = max(distavg)
return L
示例15: drazin
def drazin(A, tol):
CB = A.copy()
Bs = []
Cs = []
k = 1
while not (sp.absolute(CB) < tol).all() and sp.absolute(la.det(CB)) < tol:
U, s, Vh = la.svd(CB)
S = sp.diag(s)
S = S * (S > tol)
r = sp.count_nonzero(S)
B = sp.dot(U, sp.sqrt(S))
C = sp.dot(sp.sqrt(S), Vh)
B = B[:, 0:r]
Bs.append(B)
C = C[0:r, :]
Cs.append(C)
CB = sp.dot(C, B)
k += 1
D = sp.eye(A.shape[0])
for B in Bs:
D = sp.dot(D, B)
if (sp.absolute(CB) < tol).all():
D = sp.dot(D, CB)
else:
D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
for C in reversed(Cs):
D = sp.dot(D, C)
return D