本文整理汇总了Python中numpy.greater函数的典型用法代码示例。如果您正苦于以下问题:Python greater函数的具体用法?Python greater怎么用?Python greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了greater函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prune_outside_window
def prune_outside_window(boxlist, window):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also ClipToWindow which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
of the window.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
coordinate_violations = np.hstack([np.less(y_min, win_y_min),
np.less(x_min, win_x_min),
np.greater(y_max, win_y_max),
np.greater(x_max, win_x_max)])
valid_indices = np.reshape(
np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
return gather(boxlist, valid_indices), valid_indices
示例2: get_rt_change_deriv
def get_rt_change_deriv(kin_sig, bins, d_vel_thres = 0., fs = 60):
'''
input:
kin_sig: trials x time array corresponding to velocity of the cursor
start_tm: time from beginning of 'bins' of which to ignore any motion (e.g. if hold
time is 200 ms, and your kin_sig starts at the beginning of the hold time, set
start_tm = 0.2 to prevent micromovements in the hold time from being captured)
output:
kin_feat : a trl x 3 array:
column1 = RT in units of "bins" indices
column2 = RT in units of time (bins[column1])
column3 = index of max of kin_sig
'''
ntrials= kin_sig.shape[0]
kin_feat = np.zeros((ntrials, 2))
#Iterate through trials
for trl in range(ntrials):
spd = kin_sig[trl,:]
dt = 1./fs
d_spd = np.diff(spd,axis=0)/dt
if len(np.ravel(np.nonzero(np.greater(d_spd,d_vel_thres))))==0:
bin_rt = 0
else:
bin_rt = np.ravel(np.nonzero(np.greater(d_spd,d_vel_thres)))[0]
kin_feat[trl, 0] = bin_rt + 1 #Index of 'RT'
kin_feat[trl, 1] = bins[kin_feat[trl, 0]] #Actual time of 'RT'
return kin_feat
示例3: _read_particles
def _read_particles(self):
if not os.path.exists(self.particle_filename): return
with open(self.particle_filename, 'r') as f:
lines = f.readlines()
self.num_stars = int(lines[0].strip().split(' ')[0])
for num, line in enumerate(lines[1:]):
particle_position_x = float(line.split(' ')[1])
particle_position_y = float(line.split(' ')[2])
particle_position_z = float(line.split(' ')[3])
coord = [particle_position_x, particle_position_y, particle_position_z]
# for each particle, determine which grids contain it
# copied from object_finding_mixin.py
mask = np.ones(self.num_grids)
for i in range(len(coord)):
np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
ind = np.where(mask == 1)
selected_grids = self.grids[ind]
# in orion, particles always live on the finest level.
# so, we want to assign the particle to the finest of
# the grids we just found
if len(selected_grids) != 0:
grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
ind = np.where(self.grids == grid)[0][0]
self.grid_particle_count[ind] += 1
self.grids[ind].NumberOfParticles += 1
# store the position in the *.sink file for fast access.
try:
self.grids[ind]._particle_line_numbers.append(num + 1)
except AttributeError:
self.grids[ind]._particle_line_numbers = [num + 1]
示例4: pickBreakpointV2
def pickBreakpointV2(response, x1, predictor):
#print int(min(predictor))*10, int(max(predictor)+1)*10, int(max(predictor) - min(predictor) + 1)/2
#bpChoices = geneBpChoices(min(predictor), max(predictor), 20)
results = np.zeros((len(bpChoices)-1, 2))
print bpChoices
for i in range(len(bpChoices)-1):
print i
x2star = (predictor - bpChoices[i]) * np.greater(predictor, bpChoices[i])
x1star = x1 * np.greater(predictor, bpChoices[i])
tempPredictor = np.array(zip(x1, x1star, predictor, x2star))
#fileLoc = filePath + 'temp.csv'
#np.savetxt(fileLoc, tempPredictor, delimiter=',', fmt = '%s')
#print tempPredictor
tempmodel = ols.ols(response, tempPredictor,'y',['F1F2', 'F1F2star', 'dist', 'diststar'])
results[i,0] = i
#results[i,1] = tempmodel.sse
results[i,1] = tempmodel.R2
optBP = int(results[np.argmax(results, axis = 0)[1],0])
print 'Optimal Index:', optBP
print 'Optimal changepoint: ', bpChoices[optBP], ' exp value: ', np.exp(bpChoices[optBP]), ' with R2 = ', results[optBP, 1]
#x2star = (predictor - bpChoices[optBP]) * np.greater(predictor, bpChoices[optBP])
#optPredictor = np.array(zip(predictor, x2star))
#optmodel = ols.ols(response, optPredictor,'y',['x1', 'x2'])
x1star = x1 * np.greater(predictor, bpChoices[optBP])
x2star = (predictor - bpChoices[optBP]) * np.greater(predictor, bpChoices[optBP])
optPredictor = np.array(zip(x1, x1star, predictor, x2star))
optmodel = ols.ols(response, optPredictor,'y',['F1F2', 'F1F2star', 'dist', 'diststar'])
#return bpChoices[optBP], results, optmodel, optmodel.b[0]+optmodel.b[1]*predictor+optmodel.b[2]*x2star
print results, optmodel.b
print optmodel.summary()
return results
示例5: evaluate_MI
def evaluate_MI(fname, threshold = 0.95):
CUT = slice(0,1000)
# version = 3
with open(fname, 'rb') as f:
result = cPickle.load(f)
phase_phase_coherence = result['phase x phase data']
phase_phase_CMI = result['phase CMI data']
surrCoherence = result['phase x phase surrs'][CUT, ...]
surrCMI = result['phase CMI surrs'][CUT, ...]
phase_amp_condMI = result['phase amp CMI data']
surrPhaseAmpCMI = result['phase amp CMI surrs'][CUT, ...]
res_phase_coh = np.zeros_like(phase_phase_coherence)
res_phase_cmi = np.zeros_like(res_phase_coh)
res_phase_amp_CMI = np.zeros_like(res_phase_coh)
for i in range(res_phase_coh.shape[0]):
for j in range(res_phase_coh.shape[1]):
res_phase_coh[i, j] = np.sum(np.greater(phase_phase_coherence[i, j], surrCoherence[:, i, j])) / np.float(surrCoherence.shape[0])
res_phase_cmi[i, j] = np.sum(np.greater(phase_phase_CMI[i, j], surrCMI[:, i, j])) / np.float(surrCMI.shape[0])
res_phase_amp_CMI[i, j] = np.sum(np.greater(phase_amp_condMI[i, j], surrPhaseAmpCMI[:, i, j])) / np.float(surrPhaseAmpCMI.shape[0])
f.close()
res_phase_coh_thr = np.zeros_like(res_phase_coh, dtype = np.int)
res_phase_coh_thr[np.where(res_phase_coh > threshold)] = 1
res_phase_cmi_thr = np.zeros_like(res_phase_cmi, dtype = np.int)
res_phase_cmi_thr[np.where(res_phase_cmi > threshold)] = 1
res_phase_amp_CMI_thr = np.zeros_like(res_phase_amp_CMI, dtype = np.int)
res_phase_amp_CMI_thr[np.where(res_phase_amp_CMI > threshold)] = 1
return res_phase_coh_thr, res_phase_cmi_thr, res_phase_amp_CMI_thr
示例6: analyzeFrame
def analyzeFrame(bgrFrame):
mutex.acquire()
if lowerBound and upperBound:
hsvFrame = cv2.cvtColor(bgrFrame, cv2.COLOR_BGR2HSV)
centeredBox = hsvFrame[topLeft[1]:bottomLeft[1], topLeft[0]:topRight[0], :]
boxFlat = centeredBox.reshape([-1, 3])
numBroken = 0
# Doing it this ways removes worry of checkInBounds changing while analyzing an individual frame
# i.e., it won't take effect until the next frame.
if boundType == 'in':
for i in xrange(0, (boxFlat.shape)[0]):
isGreaterLower = numpy.all(numpy.greater(boxFlat[i], lowerBound))
isLessUpper = numpy.all(numpy.less(boxFlat[i], upperBound))
if isGreaterLower and isLessUpper:
numBroken = numBroken + 1
else:
for i in xrange(0, (boxFlat.shape)[0]):
isLessLower = numpy.all(numpy.less(boxFlat[i], lowerBound))
isGreaterUpper = numpy.all(numpy.greater(boxFlat[i], upperBound))
if isLessLower and isGreaterUpper:
numBroken = numBroken + 1
if (numBroken/area) >= threshold:
sys.stderr.write('Exceeded\n')
sys.stderr.flush()
mutex.release()
示例7: computeSTA
def computeSTA(spike_file,tdt_signal,channel,t_start,t_stop):
'''
Compute the spike-triggered average (STA) for a specific channel overa designated time window
[t_start,t_stop].
spike_file should be the results of plx = plexfile.openFile('filename.plx') and spike_file = plx.spikes[:].data
tdt_signal should be the array of time-stamped values just for this channel
'''
channel_spikes = [entry for entry in spike_file if (t_start <= entry[0] <= t_stop)&(entry[1]==channel)]
units = [spike[2] for spike in channel_spikes]
unit_vals = set(units) # number of units
unit_vals.remove(0) # value 0 are units marked as noise events
unit_sta = dict()
tdt_times = np.ravel(tdt_signal.times)
tdt_data = np.ravel(tdt_signal)
for unit in unit_vals:
spike_times = [spike[0] for spike in channel_spikes if (spike[2]==unit)]
start_avg = [(time - 1) for time in spike_times] # look 1 s back in time until 1 s forward in time from spike
stop_avg = [(time + 1) for time in spike_times]
epoch = np.logical_and(np.greater(tdt_times,start_avg[0]),np.less(tdt_times,stop_avg[0]))
epoch_inds = np.ravel(np.nonzero(epoch))
len_epoch = len(epoch_inds)
sta = np.zeros(len_epoch)
num_spikes = len(spike_times)
for i in range(0,num_spikes):
epoch = np.logical_and(np.greater(tdt_times,start_avg[i]),np.less(tdt_times,stop_avg[i]))
epoch_inds = np.ravel(np.nonzero(epoch))
if (len(epoch_inds) == len_epoch):
sta += tdt_data[epoch_inds]
unit_sta[unit] = sta/float(num_spikes)
return unit_sta
示例8: count_lower_neighbors
def count_lower_neighbors(data):
size_minus_2 = map(lambda s: s-2, data.shape)
from numpy import zeros, int, greater, add, subtract, int8
compare = zeros(size_minus_2, int)
count = zeros(size_minus_2, int)
offsets = ((-1,-1,-1), (-1,-1,0), (-1,-1,1),
(-1,0,-1), (-1,0,0), (-1,0,1),
(-1,1,-1), (-1,1,0), (-1,1,1),
(0,-1,-1), (0,-1,0), (0,-1,1),
(0,0,-1), (0,0,1),
(0,1,-1), (0,1,0), (0,1,1),
(1,-1,-1), (1,-1,0), (1,-1,1),
(1,0,-1), (1,0,0), (1,0,1),
(1,1,-1), (1,1,0), (1,1,1))
xsize, ysize, zsize = data.shape
for xo, yo, zo in offsets:
greater(data[1:-1,1:-1,1:-1],
data[xo+1:xsize-1+xo,yo+1:ysize-1+yo,zo+1:zsize-1+zo],
compare)
add(compare, count, count)
subtract(count, 13, count)
return count.astype(int8)
示例9: _get_plottable
def _get_plottable(self):
# If log scale is set, only pos data will be returned
x, y = self._x, self._y
try: logx = self.get_transform().get_funcx().get_type()==LOG10
except RuntimeError: logx = False # non-separable
try: logy = self.get_transform().get_funcy().get_type()==LOG10
except RuntimeError: logy = False # non-separable
if not logx and not logy:
return x, y
if self._logcache is not None:
waslogx, waslogy, xcache, ycache = self._logcache
if logx==waslogx and waslogy==logy:
return xcache, ycache
Nx = len(x)
Ny = len(y)
if logx: indx = npy.greater(x, 0)
else: indx = npy.ones(len(x))
if logy: indy = npy.greater(y, 0)
else: indy = npy.ones(len(y))
ind, = npy.nonzero(npy.logical_and(indx, indy))
x = npy.take(x, ind)
y = npy.take(y, ind)
self._logcache = logx, logy, x, y
return x, y
示例10: _getinvisible
def _getinvisible(self):
if self.invisible is not None:
inv = self.invisible
else:
inv = np.zeros(len(self.atoms))
if self.invisibilityfunction:
inv = np.logical_or(inv, self.invisibilityfunction(self.atoms))
r = self._getpositions()
if len(r) > len(inv):
# This will happen in parallel simulations due to ghost atoms.
# They are invisible. Hmm, this may cause trouble.
i2 = np.ones(len(r))
i2[:len(inv)] = inv
inv = i2
del i2
if self.cut["xmin"] is not None:
inv = np.logical_or(inv, np.less(r[:,0], self.cut["xmin"]))
if self.cut["xmax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,0], self.cut["xmax"]))
if self.cut["ymin"] is not None:
inv = np.logical_or(inv, np.less(r[:,1], self.cut["ymin"]))
if self.cut["ymax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,1], self.cut["ymax"]))
if self.cut["zmin"] is not None:
inv = np.logical_or(inv, np.less(r[:,2], self.cut["zmin"]))
if self.cut["zmax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,2], self.cut["zmax"]))
return inv
示例11: sample_3d_pdf
def sample_3d_pdf(self, pdf, points, xlim, ylim, zlim):
logger.info("Sampling FD distribution for {0} particles.".format(random_vec.shape[0]))
# Create CDF in axis 0 direction by summing in axis 1, then cumsum:
F = pdf.sum(2).sum(1).cumsum()
F /= F.max()
x = np.interp(points[:, 0], F, np.arange(F.shape[0]))
xi = np.around(x).astype(np.int) # For indexing
F2 = pdf.sum(2).cumsum(axis=1)
F2 /= F2.max(axis=1).reshape((-1, 1)).repeat(F2.shape[1], axis=1)
yi = np.greater(F2[xi, :], points[:, 1].reshape((-1, 1))).argmax(axis=1)
y = yi-(F2[xi, yi]-points[:, 1])/(F2[xi, yi]-F2[xi, yi-1]) # Interpolation
F3 = pdf.cumsum(axis=2)
F3 /= F3.max(axis=2).reshape((F3.shape[0], F3.shape[1], 1)).repeat(F3.shape[2], axis=2)
zi = np.greater(F3[xi, yi, :], points[:, 2].reshape((-1, 1))).argmax(axis=1)
z = zi-(F3[xi, yi, zi]-points[:, 2])/(F3[xi, yi, zi]-F3[xi, yi, zi-1]) # Interpolation
px = xlim[0] + x * (xlim[1] - xlim[0]) / pdf.shape[0]
py = ylim[0] + y * (ylim[1] - ylim[0]) / pdf.shape[1]
pz = zlim[0] + z * (zlim[1] - zlim[0]) / pdf.shape[2]
p = np.hstack((px.reshape((-1, 1)), py.reshape((-1, 1)), pz.reshape((-1, 1))))
return p
示例12: date_start_surcote
def date_start_surcote(self, data, trimesters_tot, trim_maj_tot, age_min_retirement):
''' Détermine la date individuelle a partir de laquelle on atteint la surcote
(a atteint l'âge légal de départ en retraite + côtisé le nombre de trimestres cible)
Rq : pour l'instant on pourrait ne renvoyer que l'année'''
agem = data.info_ind['agem']
# TODO: do something better with datesim
datesim = self.dateleg.liam
P = reduce(getattr, self.param_name.split('.'), self.P)
if P.surcote.exist == 0:
# Si pas de dispositif de surcote
return [2100*100 + 1]*len(trim_maj_tot)
else:
# 1. Construction de la matrice des booléens indiquant si l'année
# est surcotée selon critère trimestres
n_trim = array(P.plein.n_trim)
cumul_trim = trimesters_tot.cumsum(axis=1)
trim_limit = array((n_trim - nan_to_num(trim_maj_tot)))
years_surcote_trim = greater(cumul_trim.T, trim_limit).T
nb_years = years_surcote_trim.shape[1]
# 2. Construction de la matrice des booléens indiquant si l'année
# est surcotée selon critère âge
age_by_year = array([array(agem) - 12*i for i in reversed(range(nb_years))])
years_surcote_age = greater(age_by_year, array(age_min_retirement)).T
# 3. Décompte du nombre d'années répondant aux deux critères
years_surcote = years_surcote_trim*years_surcote_age
nb_years_surcote = years_surcote.sum(axis=1)
start_surcote = [datesim - nb_year*100
if nb_year > 0 else 2100*100 + 1
for nb_year in nb_years_surcote]
return start_surcote
示例13: chkoverlap
def chkoverlap(par0,par1,nphi=100):
"""
Check for overlap between two ellipses
"""
phiLIST=np.linspace(0.,2*np.pi,nphi)
x0,y0=phi2xy_ellipse(phiLIST,**par0) ; r0=np.sqrt(x0**2+y0**2)
x1,y1=phi2xy_ellipse(phiLIST,**par1) ; r1=np.sqrt(x1**2+y1**2)
return not (np.all(np.greater(r0,r1)) or np.all(np.greater(r1,r0)))
示例14: __gt__
def __gt__(a, b):
try:
return np.greater(a.v, b.v)
except AttributeError:
if isinstance(a, Measurement):
return np.greater(a.v, b)
else:
return np.greater(a, b.v)
示例15: valid_na_data
def valid_na_data(ij):
"pull out the k-values of an ijk array that are positive and have indices in the vicinity of north america"
x = ij_to_ll(ij)
imask = np.logical_and(np.greater(x[:,0], -150), np.greater(-50, x[:,0]))
jmask = np.logical_and(np.greater(x[:,1], 20), np.greater(70, x[:,1]))
kmask = np.greater(x[:,2], 0)
xmask = np.logical_and(np.logical_and(imask, jmask), kmask)
return x[:,2][xmask]