本文整理汇总了Python中numpy.less函数的典型用法代码示例。如果您正苦于以下问题:Python less函数的具体用法?Python less怎么用?Python less使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了less函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_epoch_end
def on_epoch_end(self, epoch, logs={}):
global DROPOUT_RATES
assert hasattr(self.model.optimizer, 'lr'), \
'Optimizer must have a "lr" attribute.'
current = logs.get('val_loss')
if not np.less(current, self.previous):
if self.wait > self.patience:
self.wait = 0.0
lr = self.model.optimizer.get_config()["lr"]
print(lr, type(lr))
if self.verbose > 0:
print("decreasing learning rate %f to %f" % (lr, lr / 1.01))
K.set_value(self.model.optimizer.lr, lr / self.lr_divide)
K.set_value(self.model.drop)
else:
self.wait += 1
print("increasing dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
for i, j in enumerate(DROPOUT_RATES):
DROPOUT_RATES[i] = j * 1.05
print("new dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
else:
self.wait = 0.0
if np.less(current, self.best_loss):
lr = self.model.optimizer.get_config()["lr"]
print(lr, type(lr))
K.set_value(self.model.optimizer.lr, lr * 1.01)
print("increasing learning rate from %f to %f" % (lr, lr / 1.05))
print("decreasing dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
for i, j in enumerate(DROPOUT_RATES):
DROPOUT_RATES[i] = j / 1.05
print("new dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
elif self.verbose > 0:
print("learning rate is good for now")
self.previous = current
示例2: computePeakPowerPerChannel
def computePeakPowerPerChannel(lfp,Fs,stim_freq,t_start,t_end,freq_window):
'''
Input:
- lfp: dictionary with one entry per channel of array of lfp samples
- Fs: sample frequency in Hz
- stim_freq: frequency to notch out when normalizing spectral power
- t_start: time window start in units of sample number
- t_end: time window end in units of sample number
- freq_window: frequency band over which to look for peak power, should be of form [f_low,f_high]
Output:
- peak_power: an array of length equal to the number of channels, containing the peak power of each channel in
the designated frequency band
'''
channels = lfp.keys()
f_low = freq_window[0]
f_high = freq_window[1]
counter = 0
peak_power = np.zeros(len(channels))
for chann in channels:
lfp_snippet = lfp[chann][t_start:t_end]
num_timedom_samples = lfp_snippet.size
freq, Pxx_den = signal.welch(lfp_snippet, Fs, nperseg=512, noverlap=256)
norm_freq = np.append(np.ravel(np.nonzero(np.less(freq,stim_freq-3))),np.ravel(np.nonzero(np.less(freq,stim_freq+3))))
total_power_Pxx_den = np.sum(Pxx_den[norm_freq])
Pxx_den = Pxx_den/total_power_Pxx_den
freq_band = np.less(freq,f_high)&np.greater(freq,f_low)
freq_band_ind = np.ravel(np.nonzero(freq_band))
peak_power[counter] = np.max(Pxx_den[freq_band_ind])
counter += 1
return peak_power
示例3: _getinvisible
def _getinvisible(self):
if self.invisible is not None:
inv = self.invisible
else:
inv = np.zeros(len(self.atoms))
if self.invisibilityfunction:
inv = np.logical_or(inv, self.invisibilityfunction(self.atoms))
r = self._getpositions()
if len(r) > len(inv):
# This will happen in parallel simulations due to ghost atoms.
# They are invisible. Hmm, this may cause trouble.
i2 = np.ones(len(r))
i2[:len(inv)] = inv
inv = i2
del i2
if self.cut["xmin"] is not None:
inv = np.logical_or(inv, np.less(r[:,0], self.cut["xmin"]))
if self.cut["xmax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,0], self.cut["xmax"]))
if self.cut["ymin"] is not None:
inv = np.logical_or(inv, np.less(r[:,1], self.cut["ymin"]))
if self.cut["ymax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,1], self.cut["ymax"]))
if self.cut["zmin"] is not None:
inv = np.logical_or(inv, np.less(r[:,2], self.cut["zmin"]))
if self.cut["zmax"] is not None:
inv = np.logical_or(inv, np.greater(r[:,2], self.cut["zmax"]))
return inv
示例4: _numpy
def _numpy(self, data, weights, shape):
q = self.quantity(data)
self._checkNPQuantity(q, shape)
self._checkNPWeights(weights, shape)
weights = self._makeNPWeights(weights, shape)
newentries = weights.sum()
import numpy
selection = numpy.isnan(q)
numpy.bitwise_not(selection, selection)
subweights = weights.copy()
subweights[selection] = 0.0
self.nanflow._numpy(data, subweights, shape)
# avoid nan warning in calculations by flinging the nans elsewhere
numpy.bitwise_not(selection, selection)
q = numpy.array(q, dtype=numpy.float64)
q[selection] = float("-inf")
weights = weights.copy()
weights[selection] = 0.0
selection = numpy.empty(q.shape, dtype=numpy.bool)
for threshold, sub in self.bins:
numpy.less(q, threshold, selection)
subweights[:] = weights
subweights[selection] = 0.0
sub._numpy(data, subweights, shape)
# no possibility of exception from here on out (for rollback)
self.entries += float(newentries)
示例5: testCompRSS
def testCompRSS():
x1 = np.array([1,2,3])
y1 = np.array([1,2,3])
mod1 = lsr.LeastSquare(x1,y1)
try:
mod1.compRSS(x1, estimator="NormalFunction")
print "FAILED to check input arguments!"
except ValueError:
print "check input arguments CORRECT!"
try:
mod1.compRSS(estimator='NormalFunction')
print "FAILED to catch non-initialization error"
except ValueError:
print "check field variable initialization CORRECT!"
mod1.normFunc()
rssNF = mod1.compRSS(estimator='NormalFunction')
epson = 1e-6
if np.less(abs(rssNF), epson).all():
print "compute RSS through normal function CORRECT!"
else:
print "FAILED to compute RSS correctly through normal function!"
mod1.gradientDescent(step=0.05, iteration=150)
rssGD = mod1.compRSS()
if np.less(abs(rssGD), epson).all():
print "compute RSS through gradient descent CORRECT!"
else:
print "FAILED to compute RSS correctly through gradient descent!"
示例6: computeSTA
def computeSTA(spike_file,tdt_signal,channel,t_start,t_stop):
'''
Compute the spike-triggered average (STA) for a specific channel overa designated time window
[t_start,t_stop].
spike_file should be the results of plx = plexfile.openFile('filename.plx') and spike_file = plx.spikes[:].data
tdt_signal should be the array of time-stamped values just for this channel
'''
channel_spikes = [entry for entry in spike_file if (t_start <= entry[0] <= t_stop)&(entry[1]==channel)]
units = [spike[2] for spike in channel_spikes]
unit_vals = set(units) # number of units
unit_vals.remove(0) # value 0 are units marked as noise events
unit_sta = dict()
tdt_times = np.ravel(tdt_signal.times)
tdt_data = np.ravel(tdt_signal)
for unit in unit_vals:
spike_times = [spike[0] for spike in channel_spikes if (spike[2]==unit)]
start_avg = [(time - 1) for time in spike_times] # look 1 s back in time until 1 s forward in time from spike
stop_avg = [(time + 1) for time in spike_times]
epoch = np.logical_and(np.greater(tdt_times,start_avg[0]),np.less(tdt_times,stop_avg[0]))
epoch_inds = np.ravel(np.nonzero(epoch))
len_epoch = len(epoch_inds)
sta = np.zeros(len_epoch)
num_spikes = len(spike_times)
for i in range(0,num_spikes):
epoch = np.logical_and(np.greater(tdt_times,start_avg[i]),np.less(tdt_times,stop_avg[i]))
epoch_inds = np.ravel(np.nonzero(epoch))
if (len(epoch_inds) == len_epoch):
sta += tdt_data[epoch_inds]
unit_sta[unit] = sta/float(num_spikes)
return unit_sta
示例7: prune_outside_window
def prune_outside_window(boxlist, window):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also ClipToWindow which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
of the window.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
coordinate_violations = np.hstack([np.less(y_min, win_y_min),
np.less(x_min, win_x_min),
np.greater(y_max, win_y_max),
np.greater(x_max, win_x_max)])
valid_indices = np.reshape(
np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
return gather(boxlist, valid_indices), valid_indices
示例8: incident
def incident(lat, day, hour, tilt, direction, attenuate=False):
"""
incident(lat, day, hour, tilt, direction) computes the normalized
incident solar radiation of the beam on a panel with normal tilt
relative to verticle and oriented at angle direction relative to
true north.
incident ~ cos X = cos(tilt)*cos(alt) + sin(tilt)*sin(alt)*cos(dir-az)
The optional attenuate factor accounts for attenuation through the
atmosphere, typically used in conjunction with computing radiation
onto an object
"""
zen = zenith(lat, day, hour)
az = azimuth(lat, day, hour)
zrad = np.radians(zen)
trad = np.radians(tilt)
drad = np.radians(direction - az)
vert = np.where(np.less(zen,90), np.cos(trad)*np.cos(zrad),0)
hor = np.where(np.less(zen,90), np.sin(trad)*np.sin(zrad)*np.cos(drad), 0)
cosX = np.maximum(0,hor+vert)
if (attenuate):
if (attenuate == True): tau = 0.1
else: tau = attenuate
return cosX*np.exp(-tau/np.cos(zrad))
else:
return cosX
示例9: _findRobots
def _findRobots(self):
""" Finds the robots amoung the edges found
"""
## for each right edge find the next closest left edge. This forms an edge pair that could be robot
self.Robots = list()
if len(self.RightEdges) == 0 or len(self.LeftEdges) == 0:
return
for rightedge in self.RightEdges:
leftedge = self.LeftEdges[0]
i = 1
while leftedge < rightedge:
if i >= len(self.LeftEdges):
break
leftedge = self.LeftEdges[i]
i = i + 1
## now calculate the distance between the two edges
distance = self.__calculateDistanceBetweenEdges(leftedge, rightedge)
if distance > self.MINIMUM_NAO_WIDTH and distance < self.MAXIMUM_NAO_WIDTH:
x = self.CartesianData[0,rightedge:leftedge+1]
y = self.CartesianData[1,rightedge:leftedge+1]
r = self.PolarData[0,rightedge:leftedge+1]
c = numpy.less(r, 409.5)
x = numpy.compress(c, x)
y = numpy.compress(c, y)
robotx = self.__averageObjectDistance(x)
roboty = self.__averageObjectDistance(y)
c = numpy.logical_and(numpy.less(numpy.fabs(x - robotx), self.MAXIMUM_NAO_WIDTH), numpy.less(numpy.fabs(y - roboty), self.MAXIMUM_NAO_WIDTH))
x = numpy.compress(c, x)
y = numpy.compress(c, y)
robotr = math.sqrt(robotx**2 + roboty**2)
robotbearing = math.atan2(roboty, robotx)
self.Robots.append(Robot(robotx, roboty, robotr, robotbearing, x, y))
示例10: finite_diff_array
def finite_diff_array(fx, x, ix, order, window, out=None): # pragma: no cover
"""Fornberg finite difference method for array of points `ix`.
"""
fx = fx.astype(np.float64)
w = window[0]
if w < 0: # use whole window
for i, z in enumerate(ix):
out[i] = diff_fornberg(fx, x, z, order[0])
else:
forward_limit = (x[0] + w / 2)
foward_win = x[0] + w
backward_limit = (x[-1] - w / 2)
backward_win = x[-1] - w
for i, z in enumerate(ix):
if z < forward_limit: # use forward diff
bm = np.less(x, foward_win)
elif z > backward_limit: # backward diff
bm = np.greater(x, backward_win)
else: # central diff
bm = np.less(np.abs(x - z), w)
wx = x[bm]
wfx = fx[bm]
out[i] = diff_fornberg(wfx, wx, z, order[0])
示例11: generateErrors
def generateErrors(L,p):
# Generate errors on each edge independently with probability p
edgesX = np.less(np.random.rand(L,L),p) # Errors on horizontal edges
edgesY = np.less(np.random.rand(L,L),p) # Errors on vertical edges
n = np.sum(edgesX) + np.sum(edgesY)
## print 'n = %d'%n
A = findSyndromes(edgesX,edgesY,L)
## print 'lattice'
## printLattice(A,[],edgesX,edgesY,L)
pairsA = findPairs(A,edgesX,edgesY,L)
correctErrorsA(edgesX,edgesY,pairsA,L)
A = findSyndromes(edgesX,edgesY,L)
## print 'correctedLattice1'
## printLattice(A,[],edgesX,edgesY,L)
pairsA = findPairs(A,edgesX,edgesY,L)
correctErrorsA(edgesX,edgesY,pairsA,L)
## A = findSyndromes(edgesX,edgesY,L)
## B = findSyndromesZ(edgesX,edgesY,L)
## print 'correctedLattice2'
## printLattice(A,[],edgesX,edgesY,L)
## print logicalX(edgesX,L)
## print logicalZ(edgesY,L)
return logicalX(edgesX,L)&logicalZ(edgesY,L)
示例12: analyzeFrame
def analyzeFrame(bgrFrame):
mutex.acquire()
if lowerBound and upperBound:
hsvFrame = cv2.cvtColor(bgrFrame, cv2.COLOR_BGR2HSV)
centeredBox = hsvFrame[topLeft[1]:bottomLeft[1], topLeft[0]:topRight[0], :]
boxFlat = centeredBox.reshape([-1, 3])
numBroken = 0
# Doing it this ways removes worry of checkInBounds changing while analyzing an individual frame
# i.e., it won't take effect until the next frame.
if boundType == 'in':
for i in xrange(0, (boxFlat.shape)[0]):
isGreaterLower = numpy.all(numpy.greater(boxFlat[i], lowerBound))
isLessUpper = numpy.all(numpy.less(boxFlat[i], upperBound))
if isGreaterLower and isLessUpper:
numBroken = numBroken + 1
else:
for i in xrange(0, (boxFlat.shape)[0]):
isLessLower = numpy.all(numpy.less(boxFlat[i], lowerBound))
isGreaterUpper = numpy.all(numpy.greater(boxFlat[i], upperBound))
if isLessLower and isGreaterUpper:
numBroken = numBroken + 1
if (numBroken/area) >= threshold:
sys.stderr.write('Exceeded\n')
sys.stderr.flush()
mutex.release()
示例13: mat
def mat(I,viewOutput = True):
stretch = 0
scale = 1
npeaks = 1
mi = imorlet(stretch,scale,0,npeaks)
Gx = cv2.filter2D(I,-1,mi)
mi = imorlet(stretch,scale,90,npeaks)
Gy = cv2.filter2D(I,-1,mi)
Gmag = np.sqrt(Gx*Gx+Gy*Gy)
Gmag = Gmag/np.max(Gmag)
Gdir = np.arctan2(Gy,Gx)/np.pi*180 # -180 to 180
Gdir[np.less(Gdir,0)] = Gdir[np.less(Gdir,0)]+360 # 0 to 360
H = Gdir
S = np.ones(np.shape(H))
V = Gmag
if viewOutput:
nr,nc = np.shape(I)
HSV = np.zeros([nr,nc,3]).astype('float32')
HSV[:,:,0] = H
HSV[:,:,1] = S
HSV[:,:,2] = V
BGR = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)
return Gmag, Gdir, BGR
return Gmag, Gdir
示例14: alphabar
def alphabar(s, bw, bh, ori_deg, R=1.0, G=1.0, B=1.0):
"""Generate a bar into existing sprite using the alpha channel.
This fills the sprite with 'color' and then puts a [bw x bh] transparent
bar of the specified orientation in the alpha channel.
:param s: Sprite()
:param bw,bh: (pixels) bar width and height
:param ori_deg: (degrees) bar orientation
:param R,G,B: (either R is colortriple or R,G,B are 0-1 values)
:return: nothing (works in place)
"""
R, G, B = (np.array(unpack_rgb(None, R, G, B)) * 255.0).astype(np.int)
r, t = genpolar(s.w, s.h, degrees=True)
t += ori_deg
x = r * np.cos(t)
y = r * np.sin(t)
s.fill((R,G,B))
mask = np.where(np.less(abs(x), (bw/2.0)) * np.less(np.abs(y), (bh/2.0)),
255, 0)
s.alpha[::] = mask[::].astype(np.uint8)
示例15: apply
def apply(self, pict):
# get min diff & centroid assigned
min_diff = np.multiply(np.ones_like(pict, 'float64'), -1)
assigned = np.zeros_like(pict, 'uint8')
new_bg = np.multiply(np.ones_like(pict, 'uint8'), 255)
for i in range(self.K):
# get diff
cur_diff = np.multiply(np.ones_like(pict, 'float64'), ((pict - self.centroids[i]) ** 2))
assigned = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), i, assigned)
min_diff = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), cur_diff, min_diff)
# update the centroids and weight
for i in range(self.K):
update_centroids = np.multiply(
np.ones_like(pict, 'float64'),
(np.add(self.centroids[i], self.alpha * np.subtract(pict, self.centroids[i])))
)
self.centroids[i] = np.where(np.equal(assigned, i), update_centroids, self.centroids[i])
self.w[i] = np.where(np.equal(assigned, i), np.add(np.multiply((1. - self.alpha), self.w[i]), self.alpha),
np.multiply((1. - self.alpha), self.w[i]))
new_bg = np.where(np.logical_and(np.equal(assigned, i), np.greater(self.w[i], 1. / self.K)), 0, new_bg)
return new_bg