本文整理汇总了Python中numpy.array_equiv函数的典型用法代码示例。如果您正苦于以下问题:Python array_equiv函数的具体用法?Python array_equiv怎么用?Python array_equiv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array_equiv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_copy
def test_copy(self):
g = self.Grid
h = g.copy()
npt.assert_(np.array_equiv(g.domain[-1] - h.domain[-1], 0))
g.domain[-1] += 1
npt.assert_(not np.array_equiv(g.domain[-1] - h.domain[-1], 0))
return 0
示例2: test_RGB
def test_RGB():
"""
Test multi channel light class
"""
position = 10, 10
size = 50, 50
channels = 3
light = pylights.MultiLight('single_test', position, size, channels=channels)
old_value = np.array([0.8, 0.3, 0.6])
new_value = np.array([0.1, 0.1, 0.1])
light.set(old_value, False)
assert np.array_equiv(light.value_current, old_value)
assert np.array_equiv(light.value_target, old_value)
assert np.array_equiv(light.value_output, np.array([0, 0, 0]))
light.set(new_value)
assert np.array_equiv(light.value_current, old_value)
assert np.array_equiv(light.value_target, new_value)
assert np.array_equiv(light.value_output, np.array([0, 0, 0]))
light.update()
temp = (old_value + light.damping * (new_value - old_value))
assert np.array_equiv(light.value_current, temp)
assert np.array_equiv(light.value_target, new_value)
assert np.array_equiv(light.value_output, (old_value * 255.0).astype(int))
light.update()
assert np.array_equiv(light.value_output, (temp * 255.0).astype(int))
示例3: __testSequences
def __testSequences(self, device, module):
module = str(module)
# Basic Interface: Currently supports read of all sequences only
#device.write("", "WORD_ADC_ENA", 1)
# Arrange the data on the card:
predefinedSequence = numpy.array([0x00010000,
0x00030002,
0x00050004,
0x00070006,
0x00090008,
0x000b000a,
0x000d000c,
0x000f000e,
0x00110010,
0x00130012,
0x00150014,
0x00170016,
0x00ff0018], dtype=numpy.int32)
device.write_raw(module, 'AREA_DMAABLE', predefinedSequence)
expectedMatrix = numpy.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]], dtype=numpy.float32)
readInMatrix = device.read_sequences(module, 'DMA')
self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
self.assertTrue(readInMatrix.dtype == numpy.float32)
readInMatrix = device.read_sequences(registerPath = '/' + str(module)+ '/DMA')
self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
self.assertTrue(readInMatrix.dtype == numpy.float32)
示例4: bin2d_cvt_equal_mass
def bin2d_cvt_equal_mass(self, wvt=None, verbose=1) :
"""
Produce a CV Tesselation
wvt: default is None (will use preset value, see self.wvt)
"""
## Reset the status and statusnode for all nodes
self.status = np.zeros(self.npix, dtype=Nint)
self.statusnode = np.arange(self.xnode.size) + 1
if wvt is not None : self.wvt = wvt
if self.wvt : self.weight = np.ones_like(self.data)
else : self.weight = self.data**4
self.scale = 1.0
self.niter = 0
## WHILE LOOP: stop when the nodes do not move anymore ============
Oldxnode, Oldynode = copy.copy(self.xnode[-1]), copy.copy(self.ynode[-1])
while (not np.array_equiv(self.xnode, Oldxnode)) | (not np.array_equiv(self.ynode, Oldynode)):
Oldxnode, Oldynode = copy.copy(self.xnode), copy.copy(self.ynode)
## Assign the closest centroid to each bin
self.bin2d_assign_bins()
## New nodes weighted centroids
self.bin2d_weighted_centroid()
## Eq. (4) of Diehl & Statler (2006)
if self.wvt : self.scale = sqrt(self.Areanode/self.flux_node)
self.niter += 1
示例5: equiv
def equiv(self, other):
"""Test if other is an equivalent weighting.
Returns
-------
equivalent : `bool`
`True` if other is a `WeightingBase` instance with the same
`WeightingBase.impl`, which yields the same result as this
weighting for any input, `False` otherwise. This is checked
by entry-wise comparison of matrices/vectors/constants.
"""
# Optimization for equality
if self == other:
return True
elif self.exponent != getattr(other, 'exponent', -1):
return False
elif isinstance(other, MatrixWeightingBase):
if self.matrix.shape != other.matrix.shape:
return False
if self.matrix_issparse:
if other.matrix_issparse:
# Optimization for different number of nonzero elements
if self.matrix.nnz != other.matrix.nnz:
return False
else:
# Most efficient out-of-the-box comparison
return (self.matrix != other.matrix).nnz == 0
else: # Worst case: compare against dense matrix
return np.array_equal(self.matrix.todense(), other.matrix)
else: # matrix of `self` is dense
if other.matrix_issparse:
return np.array_equal(self.matrix, other.matrix.todense())
else:
return np.array_equal(self.matrix, other.matrix)
elif isinstance(other, VectorWeightingBase):
if self.matrix_issparse:
return (np.array_equiv(self.matrix.diagonal(),
other.vector) and
np.array_equal(self.matrix.asformat('dia').offsets,
np.array([0])))
else:
return np.array_equal(
self.matrix, other.vector * np.eye(self.matrix.shape[0]))
elif isinstance(other, ConstWeightingBase):
if self.matrix_issparse:
return (np.array_equiv(self.matrix.diagonal(), other.const) and
np.array_equal(self.matrix.asformat('dia').offsets,
np.array([0])))
else:
return np.array_equal(
self.matrix, other.const * np.eye(self.matrix.shape[0]))
else:
return False
示例6: test_xls_contents
def test_xls_contents():
sample = Sample.from_file(from_current_dir('sample2.xlsx'))
expected_attributes = array([[1], [2], [3], [4], [5]])
expected_categories = array([1, 2, 3, 4, 5])
expected_columns = ["Age"]
assert array_equiv(sample.attributes, expected_attributes)
assert array_equiv(sample.categories, expected_categories)
assert sample.columns == expected_columns
示例7: test_from_file_with_indices
def test_from_file_with_indices():
sample = Sample.from_file(from_current_dir('sample2.xlsx'), [0, 4])
expected_attributes = array([[1], [5]])
expected_categories = array([1, 5])
expected_columns = ["Age"]
assert array_equiv(sample.attributes, expected_attributes)
assert array_equiv(sample.categories, expected_categories)
assert sample.columns == expected_columns
示例8: test_equivalent
def test_equivalent(self):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape,
or one input array can be broadcasted to create the same shape as the other one.
"""
a = [1,2]
b = np.asarray(a)
self.assertTrue(np.array_equiv(a, b))
self.assertTrue(np.array_equiv(a,[a,a]))
示例9: test_transform
def test_transform():
sample = Sample.from_file(from_current_dir('sample3.xlsx'))
sample.merge_columns(['One', 'Three'], TestClusterer(), 'New')
expected_attributes = array([[2, 0], [2, 0]])
expected_categories = array([0, 0])
expected_columns = ['Two', 'New']
assert array_equiv(sample.attributes, expected_attributes)
assert array_equiv(sample.categories, expected_categories)
assert sample.columns == expected_columns
示例10: test_remove_existing_column
def test_remove_existing_column():
sample = Sample.from_file(from_current_dir('sample3.xlsx'))
sample.remove_column('Two')
expected_attributes = array([[1, 3], [1, 3]])
expected_categories = array([0, 0])
expected_columns = ['One', 'Three']
assert array_equiv(sample.attributes, expected_attributes)
assert array_equiv(sample.categories, expected_categories)
assert sample.columns == expected_columns
示例11: test_normalize_existing_column
def test_normalize_existing_column():
sample = Sample.from_file(from_current_dir('sample2.xlsx'))
normalizer = sample.get_normalizer((0.0, 1.0))
sample.normalize(normalizer, ["Age"])
expected_attributes = array([[0.0], [0.25], [0.5], [0.75], [1.0]])
expected_categories = array([1, 2, 3, 4, 5])
expected_columns = ["Age"]
assert array_equiv(sample.attributes, expected_attributes)
assert array_equiv(sample.categories, expected_categories)
assert sample.columns == expected_columns
示例12: _checkSameExperimentResults
def _checkSameExperimentResults(exp1, exp2):
""" Returns False if experiments gave same results, true if they match. """
if not np.array_equiv(exp1.result["learning_steps"], exp2.result["learning_steps"]):
# Same number of steps before failure (where applicable)
return False
if not np.array_equiv(exp1.result["return"], exp2.result["return"]):
# Same return on each test episode
return False
if not np.array_equiv(exp1.result["steps"], exp2.result["steps"]):
# Same number of steps taken on each training episode
return False
return True
示例13: _equal
def _equal(a, b):
#recursion on subclasses of types: tuple, list, dict
#specifically checks : float, ndarray
if type(a) is float and type(b) is float:#float
return(numpy.allclose(a, b))
elif type(a) is numpy.ndarray and type(b) is numpy.ndarray:#ndarray
return(numpy.array_equiv(a, b))#alternative for float-arrays: numpy.allclose(a, b[, rtol, atol])
elif isinstance(a, dict) and isinstance(b, dict):#dict
if len(a) != len(b):
return(False)
t = True
for key, val in a.items():
if key not in b:
return(False)
t = _equal(val, b[key])
if not t:
return(False)
return(t)
elif (isinstance(a, list) and isinstance(b, list)) or (isinstance(a, tuple) and isinstance(b, tuple)):#list, tuples
if len(a) != len(b):
return(False)
t = True
for vala, valb in zip(a, b):
t = _equal(vala, valb)
if not t:
return(False)
return(t)
else:#fallback
return(a == b)
示例14: test_agglomerative_clustering_with_distance_threshold
def test_agglomerative_clustering_with_distance_threshold(linkage):
# Check that we obtain the correct number of clusters with
# agglomerative clustering with distance_threshold.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
# test when distance threshold is set to 10
distance_threshold = 10
for conn in [None, connectivity]:
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
connectivity=conn, linkage=linkage)
clustering.fit(X)
clusters_produced = clustering.labels_
num_clusters_produced = len(np.unique(clustering.labels_))
# test if the clusters produced match the point in the linkage tree
# where the distance exceeds the threshold
tree_builder = _TREE_BUILDERS[linkage]
children, n_components, n_leaves, parent, distances = \
tree_builder(X, connectivity=conn, n_clusters=None,
return_distance=True)
num_clusters_at_threshold = np.count_nonzero(
distances >= distance_threshold) + 1
# test number of clusters produced
assert num_clusters_at_threshold == num_clusters_produced
# test clusters produced
clusters_at_threshold = _hc_cut(n_clusters=num_clusters_produced,
children=children,
n_leaves=n_leaves)
assert np.array_equiv(clusters_produced,
clusters_at_threshold)
示例15: exp
def exp(xi,tau=None):
#given a 6x1 vector returns a SE3 object
#may output garbage if matrix not invertible
c=np.zeros((3,1))
xiHat=SE3.hat(xi)
v=np.array([[xiHat[0,3]],
[xiHat[1,3]],
[xiHat[2,3]]])
w=np.array([[xiHat[2,1]],
[xiHat[0,2]],
[xiHat[1,0]]])
wtrans=w.T
what=xiHat[0:3,0:3]
normw=np.linalg.norm(w)
w2=w.dot(wtrans)-math.pow(normw,2)*np.eye(3)
if tau==None:
tau=1
print tau
if np.array_equiv(w,c)==False:
ewt=np.eye(3)+(what/normw)*math.sin(tau*normw)+w2*(1-math.cos(normw*tau))/math.pow(normw,2)
d1=np.eye(3)-ewt
d2=d1.dot(what)/math.pow(normw,2)
d3=d2.dot(v)
d32=(w.dot(wtrans).dot(v)*tau)/math.pow(normw,2)
d=d3+d32
else:
ewt=np.eye(3)
d=v*tau
expXi=np.concatenate((np.concatenate((ewt,d),axis=1),np.array([[0,0,0,1]])),axis=0)
omegatau=SE3()
omegatau.__M=expXi
return omegatau