本文整理汇总了Python中numpy.any方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.any方法的具体用法?Python numpy.any怎么用?Python numpy.any使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.any方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_intercept
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def add_intercept(self, X):
"""Add 1's to data as last features."""
# Data shape
N, D = X.shape
# Check if there's not already an intercept column
if np.any(np.sum(X, axis=0) == N):
# Report
print('Intercept is not the last feature. Swapping..')
# Find which column contains the intercept
intercept_index = np.argwhere(np.sum(X, axis=0) == N)
# Swap intercept to last
X = X[:, np.setdiff1d(np.arange(D), intercept_index)]
# Add intercept as last column
X = np.hstack((X, np.ones((N, 1))))
# Append column of 1's to data, and increment dimensionality
return X, D+1
示例2: is_pos_def
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def is_pos_def(self, A):
"""
Check for positive definiteness.
Parameters
---------
A : array
square symmetric matrix.
Returns
-------
bool
whether matrix is positive-definite.
Warning! Returns false for arrays containing inf or NaN.
"""
# Check for valid numbers
if np.any(np.isnan(A)) or np.any(np.isinf(A)):
return False
else:
return np.all(np.real(np.linalg.eigvals(A)) > 0)
示例3: set_grid_data
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def set_grid_data(self):
"""Populate the attrs that hold grid data."""
if self._grid_data_is_set:
return
self._set_mult_grid_attr()
if not np.any(getattr(self, 'sfc_area', None)):
try:
sfc_area = _grid_sfc_area(self.lon, self.lat, self.lon_bounds,
self.lat_bounds)
except AttributeError:
sfc_area = _grid_sfc_area(self.lon, self.lat)
self.sfc_area = sfc_area
try:
self.levs_thick = utils.vertcoord.level_thickness(self.level)
except AttributeError:
self.level = None
self.levs_thick = None
self._grid_data_is_set = True
示例4: query
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def query(self, coords, order=1):
"""
Returns the map value at the specified location(s) on the sky.
Args:
coords (`astropy.coordinates.SkyCoord`): The coordinates to query.
order (Optional[int]): Interpolation order to use. Defaults to `1`,
for linear interpolation.
Returns:
A float array containing the map value at every input coordinate.
The shape of the output will be the same as the shape of the
coordinates stored by `coords`.
"""
out = np.full(len(coords.l.deg), np.nan, dtype='f4')
for pole in self.poles:
m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)
if np.any(m):
data, w = self._data[pole]
x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)
out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')
return out
示例5: fetch
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def fetch(clobber=False):
"""
Downloads the 3D dust map of Leike & Ensslin (2019).
Args:
clobber (Optional[bool]): If ``True``, any existing file will be
overwritten, even if it appears to match. If ``False`` (the
default), ``fetch()`` will attempt to determine if the dataset
already exists. This determination is not 100\% robust against data
corruption.
"""
dest_dir = fname_pattern = os.path.join(data_dir(), 'leike_ensslin_2019')
fname = os.path.join(dest_dir, 'simple_cube.h5')
# Check if the FITS table already exists
md5sum = 'f54e01c253453117e3770575bed35078'
if (not clobber) and fetch_utils.check_md5sum(fname, md5sum):
print('File appears to exist already. Call `fetch(clobber=True)` '
'to force overwriting of existing file.')
return
# Download from the server
url = 'https://zenodo.org/record/2577337/files/simple_cube.h5?download=1'
fetch_utils.download_and_verify(url, md5sum, fname)
示例6: _compute_gradients
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
"""
# Assumes `x` is a list,
# and contains a tensor representing a batch of images
assert len(x) == 1 and isinstance(x, list), \
'x should be a list and contain only one image tensor'
x = x[0]
loss = reduce_mean(loss_fn(x), axis=0)
return tf.gradients(loss, x)
示例7: _prepro_cpg
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def _prepro_cpg(self, states, dists):
"""Preprocess the state and distance of neighboring CpG sites."""
prepro_states = []
prepro_dists = []
for state, dist in zip(states, dists):
nan = state == dat.CPG_NAN
if np.any(nan):
state[nan] = np.random.binomial(1, state[~nan].mean(),
nan.sum())
dist[nan] = self.cpg_max_dist
dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
prepro_states.append(np.expand_dims(state, 1))
prepro_dists.append(np.expand_dims(dist, 1))
prepro_states = np.concatenate(prepro_states, axis=1)
prepro_dists = np.concatenate(prepro_dists, axis=1)
if self.cpg_wlen:
center = prepro_states.shape[2] // 2
delta = self.cpg_wlen // 2
tmp = slice(center - delta, center + delta)
prepro_states = prepro_states[:, :, tmp]
prepro_dists = prepro_dists[:, :, tmp]
return (prepro_states, prepro_dists)
示例8: test_convert_docs
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def test_convert_docs(self):
_, df = convert_docs(self.test_pool, include_stress=False)
test_energies = df[df['dtype'] == 'energy']['y_orig']
self.assertFalse(np.any(test_energies - self.test_energies))
test_forces = df[df['dtype'] == 'force']['y_orig']
for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
self.assertEqual(force1, force2)
_, df = convert_docs(self.test_pool, include_stress=True)
test_energies = df[df['dtype'] == 'energy']['y_orig']
self.assertFalse(np.any(test_energies - self.test_energies))
test_forces = df[df['dtype'] == 'force']['y_orig']
for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
self.assertEqual(force1, force2)
test_stresses = df[df['dtype'] == 'stress']['y_orig']
for stress1, stress2 in zip(test_stresses, np.array(self.test_stresses).ravel()):
self.assertEqual(stress1, stress2)
示例9: point_on_segment
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def point_on_segment(ac, b, atol=1e-8):
'''
point_on_segment((a,b), c) yields True if point x is on segment (a,b) and False otherwise. Note
that this differs from point_in_segment in that a point that if c is equal to a or b it is
considered 'on' but not 'in' the segment.
The option atol can be given and is used only to test for difference from 0; by default it is
1e-8.
'''
(a,c) = ac
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vab = b - a
vbc = c - b
vac = c - a
dab = np.sqrt(np.sum(vab**2, axis=0))
dbc = np.sqrt(np.sum(vbc**2, axis=0))
dac = np.sqrt(np.sum(vac**2, axis=0))
return np.isclose(dab + dbc - dac, 0, atol=atol)
示例10: point_in_segment
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def point_in_segment(ac, b, atol=1e-8):
'''
point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
that this differs from point_on_segment in that a point that if c is equal to a or b it is
considered 'on' but not 'in' the segment.
The option atol can be given and is used only to test for difference from 0; by default it is
1e-8.
'''
(a,c) = ac
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vab = b - a
vbc = c - b
vac = c - a
dab = np.sqrt(np.sum(vab**2, axis=0))
dbc = np.sqrt(np.sum(vbc**2, axis=0))
dac = np.sqrt(np.sum(vac**2, axis=0))
return (np.isclose(dab + dbc - dac, 0, atol=atol) &
~np.isclose(dac - dab, 0, atol=atol) &
~np.isclose(dac - dbc, 0, atol=atol))
示例11: should_step_get_rejected
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def should_step_get_rejected(self, standardError):
"""
Given a standardError, return whether to keep or reject new
standardError according to the constraint rejectProbability.
In addition, if flexible flag is set to True, total number of atoms
not satisfying constraints definition must be decreasing or at least
remain the same.
:Parameters:
#. standardError (number): Standard error to compare with
Constraint's standard error.
:Returns:
#. result (boolean): True to reject step, False to accept.
"""
if self.__flexible:
# compute if step should get rejected as a RigidConstraint
return super(_DistanceConstraint, self).should_step_get_rejected(standardError)
else:
cond = self.activeAtomsDataAfterMove["number"]>self.activeAtomsDataBeforeMove["number"]
if np.any(cond):
return True
return False
示例12: optimize
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def optimize(self, sess, feed_dict):
reg_input, reg_weight, old_values, targets = sess.run(
[self.inputs, self.regression_weight, self.values, self.targets],
feed_dict=feed_dict)
intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
# taken from rllab
reg_coeff = 1e-5
for _ in range(5):
best_fit_weight = np.linalg.lstsq(
reg_input.T.dot(reg_input) +
reg_coeff * np.identity(reg_input.shape[1]),
reg_input.T.dot(intended_values))[0]
if not np.any(np.isnan(best_fit_weight)):
break
reg_coeff *= 10
if len(best_fit_weight.shape) == 1:
best_fit_weight = np.expand_dims(best_fit_weight, -1)
sess.run(self.update_regression_weight,
feed_dict={self.new_regression_weight: best_fit_weight})
示例13: _testDecoder
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def _testDecoder(self,
height=64,
width=64,
channels=4,
batch_norm_params=None,
decoder=models.small_decoder):
codes = tf.to_float(np.random.rand(32, 100))
with self.test_session() as sess:
output = decoder(
codes,
height=height,
width=width,
channels=channels,
batch_norm_params=batch_norm_params)
sess.run(tf.global_variables_initializer())
output_np = sess.run(output)
self.assertEqual(output_np.shape, (32, height, width, channels))
self.assertTrue(np.any(output_np))
self.assertTrue(np.all(np.isfinite(output_np)))
示例14: validate_and_fill_geometry
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def validate_and_fill_geometry(geom=None, tooclose=0.1, copy=True):
"""Check `geom` for overlapping atoms. Return flattened"""
npgeom = np.array(geom, copy=copy, dtype=np.float).reshape((-1, 3))
# Upper triangular
metric = tooclose ** 2
tooclose_inds = []
for x in range(npgeom.shape[0]):
diffs = npgeom[x] - npgeom[x + 1 :]
dists = np.einsum("ij,ij->i", diffs, diffs)
# Record issues
if np.any(dists < metric):
indices = np.where(dists < metric)[0]
tooclose_inds.extend([(x, y, dist) for y, dist in zip(indices + x + 1, dists[indices] ** 0.5)])
if tooclose_inds:
raise ValidationError(
"""Following atoms are too close: {}""".format([(i, j, dist) for i, j, dist in tooclose_inds])
)
return {"geom": npgeom.reshape((-1))}
示例15: test_forward
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import any [as 别名]
def test_forward(self, batch, return_masks=True):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
"""
img = batch['data']
img = torch.from_numpy(img).float().cuda()
_, _, _, detections, detection_masks = self.forward(img)
results_dict = get_results(self.cf, img.shape, detections, detection_masks, return_masks=return_masks)
return results_dict