本文整理匯總了Python中numpy.testing方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.testing方法的具體用法?Python numpy.testing怎麽用?Python numpy.testing使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.testing方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_forward_probability2
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_forward_probability2():
from numpy.testing import assert_array_almost_equal
model, states, symbols, seq = _wikipedia_example_hmm()
fp = 2**model._forward_probability(seq)
# examples in wikipedia are normalized
fp = (fp.T / fp.sum(axis=1)).T
wikipedia_results = [
[0.8182, 0.1818],
[0.8834, 0.1166],
[0.1907, 0.8093],
[0.7308, 0.2692],
[0.8673, 0.1327],
]
assert_array_almost_equal(wikipedia_results, fp, 4)
示例2: test_backward_probability
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_backward_probability():
from numpy.testing import assert_array_almost_equal
model, states, symbols, seq = _wikipedia_example_hmm()
bp = 2**model._backward_probability(seq)
# examples in wikipedia are normalized
bp = (bp.T / bp.sum(axis=1)).T
wikipedia_results = [
# Forward-backward algorithm doesn't need b0_5,
# so .backward_probability doesn't compute it.
# [0.6469, 0.3531],
[0.5923, 0.4077],
[0.3763, 0.6237],
[0.6533, 0.3467],
[0.6273, 0.3727],
[0.5, 0.5],
]
assert_array_almost_equal(wikipedia_results, bp, 4)
示例3: assert_array_compare
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
fill_value=True):
"""
Asserts that comparison between two masked arrays is satisfied.
The comparison is elementwise.
"""
# Allocate a common mask and refill
m = mask_or(getmask(x), getmask(y))
x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False)
y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False)
if ((x is masked) and not (y is masked)) or \
((y is masked) and not (x is masked)):
msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose,
header=header, names=('x', 'y'))
raise ValueError(msg)
# OK, now run the basic tests on filled versions
return np.testing.assert_array_compare(comparison,
x.filled(fill_value),
y.filled(fill_value),
err_msg=err_msg,
verbose=verbose, header=header)
示例4: test_warning_calls
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
示例5: test_cholesky_and_cholesky_grad_shape
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_cholesky_and_cholesky_grad_shape():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
rng = numpy.random.RandomState(utt.fetch_seed())
x = tensor.matrix()
for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)):
f_chol = theano.function([x], l.shape)
g = tensor.grad(l.sum(), x)
f_cholgrad = theano.function([x], g.shape)
topo_chol = f_chol.maker.fgraph.toposort()
topo_cholgrad = f_cholgrad.maker.fgraph.toposort()
if config.mode != 'FAST_COMPILE':
assert sum([node.op.__class__ == Cholesky
for node in topo_chol]) == 0
assert sum([node.op.__class__ == CholeskyGrad
for node in topo_cholgrad]) == 0
for shp in [2, 3, 5]:
m = numpy.cov(rng.randn(shp, shp + 10)).astype(config.floatX)
yield numpy.testing.assert_equal, f_chol(m), (shp, shp)
yield numpy.testing.assert_equal, f_cholgrad(m), (shp, shp)
示例6: test_eigvalsh
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_eigvalsh():
if not imported_scipy:
raise SkipTest("Scipy needed for the geigvalsh op.")
import scipy.linalg
A = theano.tensor.dmatrix('a')
B = theano.tensor.dmatrix('b')
f = function([A, B], eigvalsh(A, B))
rng = numpy.random.RandomState(utt.fetch_seed())
a = rng.randn(5, 5)
a = a + a.T
for b in [10 * numpy.eye(5, 5) + rng.randn(5, 5)]:
w = f(a, b)
refw = scipy.linalg.eigvalsh(a, b)
numpy.testing.assert_array_almost_equal(w, refw)
# We need to test None separatly, as otherwise DebugMode will
# complain, as this isn't a valid ndarray.
b = None
B = theano.tensor.NoneConst
f = function([A], eigvalsh(A, B))
w = f(a)
refw = scipy.linalg.eigvalsh(a, b)
numpy.testing.assert_array_almost_equal(w, refw)
示例7: test_diag
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_diag(self):
# test that it builds a matrix with given diagonal when using
# vector inputs
x = theano.tensor.vector()
y = diag(x)
assert y.owner.op.__class__ == AllocDiag
# test that it extracts the diagonal when using matrix input
x = theano.tensor.matrix()
y = extract_diag(x)
assert y.owner.op.__class__ == ExtractDiag
# other types should raise error
x = theano.tensor.tensor3()
ok = False
try:
y = extract_diag(x)
except TypeError:
ok = True
assert ok
# not testing the view=True case since it is not used anywhere.
示例8: test_numpy_dot_product_2a
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_numpy_dot_product_2a(self):
random.seed(44)
listLength = 20
arr1 = [random.uniform(-10, 10) for _ in range(0, listLength)]
arr2 = [random.uniform(-10, 10) for _ in range(0, listLength)]
def f():
a = numpy.array(arr1)
b = numpy.array(arr2)
return numpy.dot(a, b)
r1 = self.evaluateWithExecutor(f)
r2 = f()
numpy.testing.assert_allclose(r1, r2)
示例9: test_numpy_dot_product_2b
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_numpy_dot_product_2b(self):
random.seed(44)
listLength = 20
arr1 = [random.uniform(-10, 10) for _ in range(0, listLength)]
arr2 = [random.uniform(-10, 10) for _ in range(0, listLength)]
def f():
a = numpy.array(arr1)
b = numpy.array(arr2)
return a.dot(b)
r1 = self.evaluateWithExecutor(f)
r2 = f()
numpy.testing.assert_allclose(r1, r2)
示例10: test_hyp2f1_2
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_hyp2f1_2(self):
def f(a, b, c, z):
return scipy.special.hyp2f1(a, b, c, z)
a,b,c,z = 2.8346157367796936, 0.0102, 3.8346157367796936, 0.9988460588541513
res1 = self.evaluateWithExecutor(f, a, b, c, z)
res2 = f(a, b, c, z)
numpy.testing.assert_almost_equal(
res1,
res2
)
numpy.testing.assert_almost_equal(
res1,
1.0182383750413575
)
示例11: binary_logistic_regression_probabilities
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def binary_logistic_regression_probabilities(self, method):
X, y = self.exampleData()
def f():
fit = BinaryLogisticRegressionFitter(
C=1.0/len(X),
hasIntercept=True,
method=method
).fit(X, y)
return fit.predict_probability(X)
expectedPredictedProbabilities = [0.45810128, 0.58776695, 0.6510714]
computedProbabilities = self.evaluateWithExecutor(f)
numpy.testing.assert_allclose(
computedProbabilities,
expectedPredictedProbabilities,
rtol=0.1
)
示例12: test_sort_tensor_by_length
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_sort_tensor_by_length(self):
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(
tensor, sequence_lengths
)
# Test sorted indices are padded correctly.
numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0)
assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
示例13: test_weighted_sum_works_on_simple_input
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_weighted_sum_works_on_simple_input(self):
batch_size = 1
sentence_length = 5
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.FloatTensor([[0.3, 0.4, 0.1, 0, 1.2]])
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, embedding_dim)
expected_array = (
0.3 * sentence_array[0, 0]
+ 0.4 * sentence_array[0, 1]
+ 0.1 * sentence_array[0, 2]
+ 0.0 * sentence_array[0, 3]
+ 1.2 * sentence_array[0, 4]
)
numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
示例14: test_weighted_sum_handles_higher_order_input
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_weighted_sum_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
expected_array = (
attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0]
+ attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
示例15: test_weighted_sum_handles_3d_attention_with_3d_matrix
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import testing [as 別名]
def test_weighted_sum_handles_3d_attention_with_3d_matrix(self):
batch_size = 1
length_1 = 5
length_2 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_2, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, embedding_dim)
for i in range(length_1):
expected_array = (
attention_array[0, i, 0] * sentence_array[0, 0]
+ attention_array[0, i, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, i], expected_array, decimal=5)