本文整理汇总了Python中numpy.choose函数的典型用法代码示例。如果您正苦于以下问题:Python choose函数的具体用法?Python choose怎么用?Python choose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了choose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reflectivity
def reflectivity(self, Q, L=1):
"""
Compute the Fresnel reflectivity at the given Q/wavelength.
"""
# If Q < 0, then we are going from substrate into incident medium.
# In that case we must negate the change in scattering length density
# and ignore the absorption.
drho = self.rho-self.Vrho
S = 4*pi*choose(Q<0,(-drho,drho)) \
+ 2j*pi/L*choose(Q<0,(self.Vmu,self.mu))
kz = abs(Q)/2
f = sqrt(kz**2 - S) # fresnel coefficient
# Compute reflectivity amplitude, with adjustment for roughness
amp = (kz-f)/(kz+f) * exp(-2*self.sigma**2*kz*f)
# Note: we do not need to check for a divide by zero.
# Qc^2 = 16 pi rho. Since rho is non-zero then Qc is non-zero.
# For mu = 0:
# * If |Qz| < Qc then f has an imaginary component, so |Qz|+f != 0.
# * If |Qz| > Qc then |Qz| > 0 and f > 0, so |Qz|+f != 0.
# * If |Qz| = Qc then |Qz| != 0 and f = 0, so |Qz|+f != 0.
# For mu != 0:
# * f has an imaginary component, so |Q|+f != 0.
R = real(amp*conj(amp))
return R
示例2: rgb_to_hsv
def rgb_to_hsv(r, g, b):
maxc = np.maximum(r, np.maximum(g, b))
minc = np.minimum(r, np.minimum(g, b))
v = maxc
minc_eq_maxc = np.equal(minc, maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = np.ones_like(r)
maxc_minus_minc = np.choose(minc_eq_maxc, (maxc-minc, ones))
s = (maxc-minc) / np.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
maxc_is_r = np.equal(maxc, r)
maxc_is_g = np.equal(maxc, g)
maxc_is_b = np.equal(maxc, b)
h = np.zeros_like(r)
h = np.choose(maxc_is_b, (h, gc-rc+4.0))
h = np.choose(maxc_is_g, (h, rc-bc+2.0))
h = np.choose(maxc_is_r, (h, bc-gc))
h = np.mod(h/6.0, 1.0)
return (h, s, v)
示例3: _daylength_processor
def _daylength_processor(self, timearray) :
"""computes daylength
Performs a simple subtraction of sunset-sunrise time. There are three
potential cases:
* if positive, we have the daylength, stop
* if negative, we have the negative of nightlength, add one.
* if zero, the sun is either always up or always down. compute the
solar altitude to find out.
"""
# sunset - sunrise
daylength = timearray[:,2] - timearray[:,1]
# handle negative case
daylength = np.choose(daylength<(0*u.min), [daylength, daylength+1*u.sday]) * u.sday
# any "always up" or "always down"?
no_rise_set = np.abs(timearray[:,2]-timearray[:,1]) < 1*u.min
# hardcode sunrise == transit == sunset
timearray[no_rise_set,1] = timearray[no_rise_set,0]
timearray[no_rise_set,2] = timearray[no_rise_set,0]
if np.any(no_rise_set) :
dec, H, alt = self._calc_event_body_params(timearray[no_rise_set,:], self.obs_location[no_rise_set])
daylength[no_rise_set] = np.choose(alt[:,1]>0*u.deg, [0, 1]) * u.day
return daylength
示例4: calc_fritch
def calc_fritch(data):
"""Calculate the fritch index from tmin data (C)"""
growing_threshold=5
startdate=np.zeros(data.shape[1:])+999
enddate=np.zeros(data.shape[1:])
curwarmdays=np.zeros(data.shape[1:])
for doy in range(data.shape[0]):
warmdays=np.where(data[doy,...]>growing_threshold)
colddays=np.where(data[doy,...]<=growing_threshold)
if len(warmdays[0])>0:
curwarmdays[warmdays]+=1
if len(colddays[0])>0:
curwarmdays[colddays]=0
growing=np.where(curwarmdays==5)
if len(growing[0])>0:
startdate[growing]=np.choose((doy-5)<startdate[growing],(startdate[growing],doy-5))
enddate[growing]=np.choose(doy>enddate[growing],(enddate[growing],doy))
growing_season=enddate-startdate
growing_season[growing_season<0]=0
return growing_season
示例5: _read_particles
def _read_particles(self):
if not os.path.exists(self.particle_filename): return
with open(self.particle_filename, 'r') as f:
lines = f.readlines()
self.num_stars = int(lines[0].strip().split(' ')[0])
for num, line in enumerate(lines[1:]):
particle_position_x = float(line.split(' ')[1])
particle_position_y = float(line.split(' ')[2])
particle_position_z = float(line.split(' ')[3])
coord = [particle_position_x, particle_position_y, particle_position_z]
# for each particle, determine which grids contain it
# copied from object_finding_mixin.py
mask = np.ones(self.num_grids)
for i in range(len(coord)):
np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
ind = np.where(mask == 1)
selected_grids = self.grids[ind]
# in orion, particles always live on the finest level.
# so, we want to assign the particle to the finest of
# the grids we just found
if len(selected_grids) != 0:
grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
ind = np.where(self.grids == grid)[0][0]
self.grid_particle_count[ind] += 1
self.grids[ind].NumberOfParticles += 1
# store the position in the *.sink file for fast access.
try:
self.grids[ind]._particle_line_numbers.append(num + 1)
except AttributeError:
self.grids[ind]._particle_line_numbers = [num + 1]
示例6: hideNoData
def hideNoData(inBand,outBand):
noData = inBand.GetNoDataValue()
for y in range(inBand.YSize):
inLine = inBand.ReadAsArray(0,y,inBand.XSize,1,inBand.XSize,1)
outLine = numpy.choose(numpy.equal(inLine,noData),(inLine,0))
outLine = numpy.choose(numpy.not_equal(inLine,noData),(outLine,0xFF))
outBand.WriteArray(outLine,0,y)
示例7: set_in_region
def set_in_region(a, b, alpha=1.0, beta=1.0, mask=None, out=None):
"""set `ret = alpha * a + beta * b` where mask is True"""
alpha = np.asarray(alpha, dtype=a.dtype)
beta = np.asarray(beta, dtype=a.dtype)
a_dat = a.data if isinstance(a, viscid.field.Field) else a
b_dat = b.data if isinstance(b, viscid.field.Field) else b
b = None
if _HAS_NUMEXPR:
vals = ne.evaluate("alpha * a_dat + beta * b_dat")
else:
vals = alpha * a_dat + beta * b_dat
a_dat = b_dat = None
if out is None:
out = field.empty_like(a)
if mask is None:
out.data[...] = vals
else:
if hasattr(mask, "nr_comps") and mask.nr_comps:
mask = mask.as_centered(a.center).as_layout(a.layout)
try:
out.data[...] = np.choose(mask, [out.data, vals])
except ValueError:
out.data[...] = np.choose(mask.data.reshape(list(mask.sshape) + [1]),
[out.data, vals])
return out
示例8: quad_eqn
def quad_eqn(l, m, t, aa, bb, cc):
"""
solves the following eqns for m and l
m = (-bb +- sqrt(bb^2 - 4*aa*cc))/(2*aa)
l = (l-a1 - a3*m)/(a2 + a4*m)
"""
if len(aa) is 0:
return
k = bb * bb - 4 * aa * cc
k = np.ma.masked_less(k, 0)
det = np.ma.sqrt(k)
m1 = (-bb - det) / (2 * aa)
l1 = (x[t] - a[0][t] - a[2][t] *
m1) / (a[1][t] + a[3][t] * m1)
m2 = (-bb + det) / (2 * aa)
l2 = (x[t] - a[0][t] - a[2][t] *
m2) / (a[1][t] + a[3][t] * m2)
t1 = np.logical_or(l1 < 0, l1 > 1)
t2 = np.logical_or(m1 < 0, m1 > 1)
t3 = np.logical_or(t1, t2)
m[t] = np.choose(t3, (m1, m2))
l[t] = np.choose(t3, (l1, l2))
示例9: stitch
def stitch(record1, record2):
seq1 = array([record1.seq.tostring()])
seq2 = array([reverse_complement(record2.seq.tostring())])
seq1.dtype = '|S1'
seq2.dtype = '|S1'
quals1 = array(record1.letter_annotations['phred_quality'])
quals2 = array(record2.letter_annotations['phred_quality'][::-1])
log10p_consensus_1 = log1p(-power(10, -quals1 / 10.)) / log(10)
log10p_consensus_2 = log1p(-power(10, -quals2 / 10.)) / log(10)
log10p_error_1 = -log10(3) - (quals1 / 10.)
log10p_error_2 = -log10(3) - (quals2 / 10.)
min_overlap = 1
max_overlap = max(len(record1), len(record2))
overlaps = {}
for overlap in range(1, max_overlap):
s1 = seq1[-overlap:]
s2 = seq2[:overlap]
q1 = quals1[-overlap:]
q2 = quals2[:overlap]
lpc1 = log10p_consensus_1[-overlap:]
lpc2 = log10p_consensus_2[:overlap]
lpe1 = log10p_error_1[-overlap:]
lpe2 = log10p_error_2[:overlap]
consensus = choose(q1 < q2, [s1, s2])
score = sum(choose(consensus == s1, [lpe1, lpc1])) + sum(choose(consensus == s2, [lpe2, lpc2])) + len(consensus) * log10(4) * 2 # last term is null hypothesis, p=1/4
consensus.dtype = '|S%i' % len(consensus)
overlaps[overlap] = (consensus[0],score)
return overlaps
示例10: rgb_to_hsv
def rgb_to_hsv( r,g,b ):
maxc = numpy.maximum(r,numpy.maximum(g,b))
minc = numpy.minimum(r,numpy.minimum(g,b))
v = maxc
minc_eq_maxc = numpy.equal(minc,maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = numpy.ones((r.shape[0],r.shape[1]))
maxc_minus_minc = numpy.choose( minc_eq_maxc, (maxc-minc,ones) )
s = (maxc-minc) / numpy.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
maxc_is_r = numpy.equal(maxc,r)
maxc_is_g = numpy.equal(maxc,g)
maxc_is_b = numpy.equal(maxc,b)
h = numpy.zeros((r.shape[0],r.shape[1]))
h = numpy.choose( maxc_is_b, (h,4.0+gc-rc) )
h = numpy.choose( maxc_is_g, (h,2.0+rc-bc) )
h = numpy.choose( maxc_is_r, (h,bc-gc) )
h = numpy.mod(h/6.0,1.0)
hsv = numpy.asarray([h,s,v])
return hsv
示例11: __init__
def __init__(self, template_img_with_alpha):
# result must be 0 where template is transparent
# sum of result must be 0
# scale of result doesn't matter
# scale and bias of input shouldn't matter
assert template_img_with_alpha.shape[2] == 4
self._orig = template_img_with_alpha.astype(float)
return
opaque = template_img_with_alpha[:, :, 3] >= 128
opaque3 = numpy.dstack([opaque]*3)
#opaque_pixels = [pixel[:3].astype(int)
# for row in template_img_with_alpha
# for pixel in row
# if is_opaque(pixel)]
mean = numpy.sum(numpy.choose(opaque3, [0, template_img_with_alpha[:, :, :3]]), axis=(0, 1))/numpy.sum(opaque)
#print mean
res = numpy.choose(opaque3, [0, template_img_with_alpha[:, :, :3] - mean])
#res = numpy.array([
# [pixel[:3] - mean if is_opaque(pixel) else [0, 0, 0] for pixel in row]
#for row in template_img_with_alpha])
#cv2.imshow('normalize(res)', normalize(res))
self._template = res # floating point 3-channel image
示例12: _partial_transpose_sparse
def _partial_transpose_sparse(rho, mask):
"""
Implement the partial transpose using the CSR sparse matrix.
"""
data = sp.lil_matrix((rho.shape[0], rho.shape[1]), dtype=complex)
for m in range(len(rho.data.indptr) - 1):
n1 = rho.data.indptr[m]
n2 = rho.data.indptr[m + 1]
psi_A = state_index_number(rho.dims[0], m)
for idx, n in enumerate(rho.data.indices[n1:n2]):
psi_B = state_index_number(rho.dims[1], n)
m_pt = state_number_index(
rho.dims[1], np.choose(mask, [psi_A, psi_B]))
n_pt = state_number_index(
rho.dims[0], np.choose(mask, [psi_B, psi_A]))
data[m_pt, n_pt] = rho.data.data[n1 + idx]
return Qobj(data.tocsr(), dims=rho.dims)
示例13: sparsify
def sparsify(a, p=0.25):
"""
SPARSIFY Randomly set matrix elements to zero.
S = SPARSIFY(A, P) is A with elements randomly set to zero
(S = S' if A is square and A = A', i.e. symmetry is preserved).
Each element has probability P of being zeroed.
Thus on average 100*P percent of the elements of A will be zeroed.
Default: P = 0.25.
Note added in porting: by inspection only, it appears the the m*lab
version may have a bug where it always returns zeros on the diagonal
for a symmetric matrix... can anyone confirm?
"""
if p < 0 or p > 1:
raise Higham('Second parameter must be between 0 and 1 inclusive.')
m, n = a.shape
if (a == a.T).all():
# Preserve symmetry
d = np.choose(nrnd.rand(m) > p, (np.zeros(m), np.diag(a)))
a = np.triu(a, 1) * (nrnd.rand(m, n) > p)
a = a + a.T
a = a + np.diag(d)
else:
# Unsymmetric case
a = np.choose(nrnd.rand(m, n) > p, (np.zeros((m, n)), a))
return a
示例14: setFluxoCalSolo
def setFluxoCalSolo(self):
self.mask1 = self.ndvi < 0
#-----#------#
self.fluxoCalSolo = numpy.choose(self.mask1, (((self.temperaturaSuperficie - 273.15) * (0.0038 + (0.0074 * self.albedoSuperficie))\
* (1.0 - (0.98 * numpy.power(self.ndvi,4)))) * self.saldoRadiacao, self.valores.G * self.saldoRadiacao))
#-----#------#
self.mask1 = None
self.fluxoCalSolo = numpy.choose(self.mask, (self.valores.noValue, self.fluxoCalSolo))
示例15: updateDistribution
def updateDistribution(self, tree, dataMatrix, mislabels, distribution, beta):
probs = tree.predictProbabilities(dataMatrix.iloc[mislabels[:,0]])
probsIncorrect = np.choose(mislabels[:,1], probs.T)
probsCorrect = np.choose(mislabels[:,2], probs.T)
power = (0.5 * sum(distribution * (1 + probsCorrect - probsIncorrect)))
distribution = distribution * (np.power(beta,power))
distribution = helpers.toProbDistribution(distribution)
return distribution