本文整理汇总了Python中numpy.not_equal函数的典型用法代码示例。如果您正苦于以下问题:Python not_equal函数的具体用法?Python not_equal怎么用?Python not_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了not_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parseArgs
def parseArgs(data, targetClass, otherClass = None, **args) :
'''parse arguments for a feature scoring function'''
if 'feature' in args :
feature = args['feature']
else :
feature = None
if 'Y' in args :
Y = args['Y']
if otherClass is None :
otherI = numpy.nonzero(numpy.not_equal(Y, targetClass))[0]
else :
otherI = numpy.nonzero(numpy.equal(Y, otherClass))[0]
targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
else :
Y = None
if otherClass is None :
otherI = numpy.nonzero(numpy.not_equal(data.labels.Y, targetClass))[0]
else :
otherI = data.labels.classes[otherClass]
targetClassSize = len(data.labels.classes[targetClass])
otherClassSize = len(otherI)
return Y, targetClassSize, otherClassSize, otherI, feature
示例2: node_can_drain
def node_can_drain(self, the_node):
"""Check if a node has drainage away from the current lake/depression.
Parameters
----------
the_node : int
The node to test.
nodes_this_depression : array_like of int
Nodes that form a pit.
Returns
-------
boolean
``True`` if the node can drain. Otherwise, ``False``.
"""
nbrs = self._node_nbrs[the_node]
not_bad = nbrs != LOCAL_BAD_INDEX_VALUE
not_too_high = self._elev[nbrs] < self._elev[the_node]
not_current_lake = np.not_equal(self.flood_status[nbrs], _CURRENT_LAKE)
not_flooded = np.not_equal(self.flood_status[nbrs], _FLOODED)
all_probs = np.logical_and(
np.logical_and(not_bad, not_too_high),
np.logical_and(not_current_lake, not_flooded))
if np.any(all_probs):
return True
else:
return False
示例3: best_grid
def best_grid(wavelengths1, wavelengths2, key):
"""
Return the best wavelength grid to regrid to arrays
Considering the two wavelength grids passed in parameters, this function
compute the best new grid that will be used to regrid the two spectra
before combining them. We do not use np.unique as it is much slowe than
finding the unique elements by hand.
Parameters
----------
wavelengths1, wavelengths2: array of floats
The wavelength grids to be 'regridded'.
key: tuple
Key to key the results in cache.
Returns
-------
new_grid: array of floats
Array containing all the wavelengths found in the input arrays.
"""
if key in best_grid_cache:
return best_grid_cache[key]
wl = np.concatenate((wavelengths1, wavelengths2))
wl.sort(kind='mergesort')
flag = np.ones(len(wl), dtype=bool)
np.not_equal(wl[1:], wl[:-1], out=flag[1:])
best_grid_cache[key] = wl[flag]
return wl[flag]
示例4: compute_distances
def compute_distances(self, x1, x2):
"""
The method uses a function implemented in Cython. Data (`x1` and `x2`)
is accompanied by two tables. One is a 2-d table in which elements of
`x1` (`x2`) are replaced by 0's and 1's. The other is a vector
indicating rows (or column) with nan values.
The function in Cython uses a fast loop without any conditions to
compute distances between rows without missing values, and a slower
loop for those with missing values.
"""
nonzeros1 = np.not_equal(x1, 0).view(np.int8)
if self.axis == 1:
nans1 = _distance.any_nan_row(x1)
if x2 is None:
nonzeros2, nans2 = nonzeros1, nans1
else:
nonzeros2 = np.not_equal(x2, 0).view(np.int8)
nans2 = _distance.any_nan_row(x2)
return _distance.jaccard_rows(
nonzeros1, nonzeros2,
x1, x1 if x2 is None else x2,
nans1, nans2,
self.ps,
x2 is not None)
else:
nans1 = _distance.any_nan_row(x1.T)
return _distance.jaccard_cols(
nonzeros1, x1, nans1, self.ps)
示例5: average_without_padding
def average_without_padding(x, ids, padding_id, cuda=False, eps=1e-8):
if cuda:
mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().cuda().permute(1, 2, 0).expand_as(x)
else:
mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().permute(1, 2, 0).expand_as(x)
s = torch.sum(x*mask, dim=2) / (torch.sum(mask, dim=2)+eps)
return s
示例6: get_calipso_phase_inner
def get_calipso_phase_inner(features, qual_min=CALIPSO_QUAL_VALUES['medium'],
max_layers=1, same_phase_in_top_three_lay=True):
"""
Returns Calipso cloud phase.
Pixels with quality lower than *qual_min* are masked out.
Screen out pixels with more than *max_layers* layers.
"""
if same_phase_in_top_three_lay:
phase1 = get_bits(features[:,0], CALIPSO_PHASE_BITS, shift=True)
phase2 = get_bits(features[:,1], CALIPSO_PHASE_BITS, shift=True)
phase3 = get_bits(features[:,2], CALIPSO_PHASE_BITS, shift=True)
two_layer_pixels = features[:, 2] >1
three_layer_pixels = features[:, 3] >1
lay1_lay2_differ = np.logical_and(two_layer_pixels,
np.not_equal(phase1, phase2))
lay2_lay3_differ = np.logical_and(three_layer_pixels,
np.not_equal(phase2, phase3))
varying_phases_in_top_3lay = np.logical_or(lay1_lay2_differ,
lay2_lay3_differ)
# Reduce to single layer, masking any multilayer pixels
features = np.ma.array(features[:, 0],
mask=(features[:, max_layers:] > 1).any(axis=-1))
if same_phase_in_top_three_lay:
features = np.ma.array(features,
mask = varying_phases_in_top_3lay)
phase = get_bits(features, CALIPSO_PHASE_BITS, shift=True)
qual = get_bits(features, CALIPSO_QUAL_BITS, shift=True)
# Don't care about pixels with lower than *qual_min* quality
return np.ma.array(phase, mask=qual < qual_min)
示例7: test_prelu_param_updates
def test_prelu_param_updates(self):
x_train, _, y_train, _ = simple_classification()
prelu_layer1 = layers.PRelu(20, alpha=0.25)
prelu_layer2 = layers.PRelu(1, alpha=0.25)
gdnet = algorithms.GradientDescent(
[
layers.Input(10),
prelu_layer1,
prelu_layer2,
]
)
prelu1_alpha_before_training = prelu_layer1.alpha.get_value()
prelu2_alpha_before_training = prelu_layer2.alpha.get_value()
gdnet.train(x_train, y_train, epochs=10)
prelu1_alpha_after_training = prelu_layer1.alpha.get_value()
prelu2_alpha_after_training = prelu_layer2.alpha.get_value()
self.assertTrue(all(np.not_equal(
prelu1_alpha_before_training,
prelu1_alpha_after_training,
)))
self.assertTrue(all(np.not_equal(
prelu2_alpha_before_training,
prelu2_alpha_after_training,
)))
示例8: scoreDuplicates
def scoreDuplicates(records, data_model, pool, threshold=0):
record, records = peek(records)
id_type = idType(record)
score_dtype = [('pairs', id_type, 2), ('score', 'f4', 1)]
record_chunks = grouper(records, 100000)
scoring_function = ScoringFunction(data_model,
threshold,
score_dtype)
results = [pool.apply_async(scoring_function,
(chunk,))
for chunk in record_chunks]
for r in results :
r.wait()
scored_pairs = numpy.concatenate([r.get() for r in results])
scored_pairs.sort()
flag = numpy.ones(len(scored_pairs), dtype=bool)
numpy.not_equal(scored_pairs[1:],
scored_pairs[:-1],
out=flag[1:])
return scored_pairs[flag]
示例9: merge
def merge(a, b):
# http://stackoverflow.com/questions/12427146/combine-two-arrays-and-sort
c = np.concatenate((a, b))
c.sort(kind='mergesort')
flag = np.ones(len(c), dtype=bool)
np.not_equal(c[1:], c[:-1], out=flag[1:])
return c[flag]
示例10: _calc_errors
def _calc_errors(truth, prediction, class_number=1):
tp = np.sum(np.equal(truth,class_number)*np.equal(prediction,class_number))
tn = np.sum(np.not_equal(truth,class_number)*np.not_equal(prediction,class_number))
fp = np.sum(np.not_equal(truth,class_number)*np.equal(prediction,class_number))
fn = np.sum(np.equal(truth,class_number)*np.not_equal(prediction,class_number))
return tp, tn, fp, fn
示例11: oht_model
def oht_model( gw, oro, fsns, flns, shfl, lhfl ):
"""parameters; must be dimensioned as specified:
gwi : gaussian weights (lat)
oroi : orography data array (lat,lon)
requires the lat and lon are attached coordinates of oro
and that oro and the following variables are 2D arrays (lat,lon).
fsnsi: net shortwave solar flux at surface (lat,lon)
flnsi: net longwave solar flux at surface (lat,lon)
shfli: sensible heat flux at surface (lat,lon)
lhfli: latent heat flux at surface (lat,lon)
"""
re = 6.371e6 # radius of earth
coef = re**2/1.e15 # scaled by PW
heat_storage = 0.3 # W/m^2 adjustment for ocean heat storage
nlat = oro.shape[0]
nlon = oro.shape[1]
dlon = 2.*pi/nlon # dlon in radians
lat = latAxis(oro)
i65n = numpy.where( lat[:]>=65 )[0][0] # assumes that lat[i+1]>lat[i]
i65s = numpy.where( lat[:]<=-65 )[0][-1] # assumes that lat[i+1]>lat[i]
# get the mask for the ocean basins
basins_mask = ocean_mask(oro) # returns 2D array(lat,lon)
# compute net surface energy flux
netflux = fsns-flns-shfl-lhfl-heat_storage
# compute the net flux for the basins
netflux_basin = numpy.ma.empty( (3,nlat,nlon) )
netflux_basin[0,:,:] = netflux[:,:]
netflux_basin[1,:,:] = netflux[:,:]
netflux_basin[2,:,:] = netflux[:,:]
netflux_basin[:,:,:] = numpy.ma.masked # to make sure the mask array gets created
netflux_basin._mask[0,:,:] = numpy.not_equal(basins_mask,1) # False on Pacific
netflux_basin._mask[1,:,:] = numpy.not_equal(basins_mask,2) # False on Atlantic
netflux_basin._mask[2,:,:] = numpy.not_equal(basins_mask,3) # False on Indian
# sum flux over the longitudes in each basin
heatflux = numpy.ma.sum( netflux_basin, axis=2 )
# compute implied heat transport in each basin
oft = cdms2.createVariable( numpy.ma.masked_all((4,nlat)) )
oft.setAxisList( [cdms2.createAxis([0,1,2,3],id='basin numer'),lat] )
# These ! signs assign a name to a dimension of oft:
#oft!0 = "basin number" # 0:pacific, 1:atlantic, 2:indian, 3:total
#oft!1 = "lat"
for n in range(3):
for j in range(i65n,i65s-1,-1): #start sum at most northern point
# ...assumes that lat[i+1]>lat[i]
oft[n,j] = -coef*dlon*numpy.ma.sum( heatflux[n,j:i65n+1]*gw[j:i65n+1] )
# compute total implied ocean heat transport at each latitude
# as the sum over the basins at that latitude
for j in range( i65n, i65s-1, -1 ):
oft[3,j] = numpy.ma.sum( oft[0:3,j] )
return oft # 2D array(4,lat)
示例12: shrink_hyperrect
def shrink_hyperrect(x0, x1, L, R):
"""
"""
L_or_R = (x1 >= x0) #Modifications to R
R[L_or_R] = x1[L_or_R]
np.not_equal(L_or_R, True, L_or_R) #Modifications to L
L[L_or_R] = x1[L_or_R]
return L, R
示例13: _numpy
def _numpy(self, data, weights, shape):
q = self.quantity(data)
self._checkNPQuantity(q, shape)
self._checkNPWeights(weights, shape)
weights = self._makeNPWeights(weights, shape)
newentries = weights.sum()
import numpy
selection = numpy.isnan(q)
numpy.bitwise_not(selection, selection)
subweights = weights.copy()
subweights[selection] = 0.0
self.nanflow._numpy(data, subweights, shape)
# avoid nan warning in calculations by flinging the nans elsewhere
numpy.bitwise_not(selection, selection)
q = numpy.array(q, dtype=numpy.float64)
q[selection] = self.high
weights = weights.copy()
weights[selection] = 0.0
numpy.greater_equal(q, self.low, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.underflow._numpy(data, subweights, shape)
numpy.less(q, self.high, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.overflow._numpy(data, subweights, shape)
if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
# Numpy defines histograms as including the upper edge of the last bin only, so drop that
weights[q == self.high] == 0.0
h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)
for hi, value in zip(h, self.values):
value.fill(None, float(hi))
else:
q = numpy.array(q, dtype=numpy.float64)
numpy.subtract(q, self.low, q)
numpy.multiply(q, self.num, q)
numpy.divide(q, self.high - self.low, q)
numpy.floor(q, q)
q = numpy.array(q, dtype=int)
for index, value in enumerate(self.values):
numpy.not_equal(q, index, selection)
subweights[:] = weights
subweights[selection] = 0.0
value._numpy(data, subweights, shape)
# no possibility of exception from here on out (for rollback)
self.entries += float(newentries)
示例14: _build_y
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
check_consistent_length(X, y, sample_weight)
X, y = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
X, y, sample_weight = [astype(array[order], np.float64, copy=False)
for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(
X, y, sample_weight)
# Store _X_ and _y_ to maintain backward compat during the deprecation
# period of X_ and y_
self._X_ = X = unique_X
self._y_ = y = isotonic_regression(unique_y, unique_sample_weight,
self.y_min, self.y_max,
increasing=self.increasing_)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]),
np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
示例15: LabelPerimeter
def LabelPerimeter(L, Connectivity=4):
"""Converts a label or binary mask image to a binary perimeter image.
Uses 4-neighbor or 8-neighbor shifts to detect pixels whose values do
not agree with their neighbors.
Parameters
----------
L : array_like
A label or binary mask image.
Connectivity : double or int
Neighborhood connectivity to evaluate. Valid values are 4 or 8.
Default value = 4.
Returns
-------
Mask : array_like
A binary image where object perimeter pixels have value 1, and
non-perimeter pixels have value 0.
See Also
--------
EmbedBounds
"""
# initialize temporary variable
Mask = np.zeros(L.shape)
Temp = np.zeros(L.shape)
# check left-right neighbors
Temp[:, 0:-2] = np.not_equal(L[:, 0:-2], L[:, 1:-1])
Temp[:, 1:-1] = np.logical_or(Temp[:, 1:-1], Temp[:, 0:-2])
Mask = np.logical_or(Mask, Temp)
# check up-down neighbors
Temp[0:-2, :] = np.not_equal(L[0:-2, :], L[1:-1, :])
Temp[1:-1, :] = np.logical_or(Temp[1:-1, :], Temp[0:-2, :])
Mask = np.logical_or(Mask, Temp)
# additional calculations if Connectivity == 8
if(Connectivity == 8):
# slope 1 diagonal shift
Temp[1:-1, 0:-2] = np.not_equal(L[0:-2, 1:-2], L[1:-1, 0:-2])
Temp[0:-2, 1:-1] = np.logical_or(Temp[0:-2, 1:-1], Temp[1:-1, 0:-2])
Mask = np.logical_or(Mask, Temp)
# slope -1 diagonal shift
Temp[1:-1, 1:-1] = np.not_equal(L[0:-2, 0:-2], L[1:-1, 1:-1])
Temp[0:-2, 0:-2] = np.logical_or(Temp[0:-2, 0:-2], Temp[1:-1, 1:-1])
Mask = np.logical_or(Mask, Temp)
# generate label-valued output
return Mask.astype(np.uint32) * L