本文整理汇总了Python中theano.tensor.all方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.all方法的具体用法?Python tensor.all怎么用?Python tensor.all使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.all方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_gpu_togpu
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def local_gpu_togpu(node):
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and \
hasattr(host_input.owner.op, 'make_gpu_node'):
try:
gpu_inputs = map(gpu_from_host, host_input.owner.inputs)
except TypeError:
return False
return [host_input.owner.op.make_gpu_node(*gpu_inputs)]
elif hasattr(node.op, 'make_gpu_node') and \
all([x.owner and x.owner.op == host_from_gpu
for x in node.inputs]):
gpu_inputs = [x.owner.inputs[0] for x in node.inputs]
return [host_from_gpu(node.op.make_gpu_node(*gpu_inputs))]
return False
示例2: test_c
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_c(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.add, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype)
for dtype in ["floatX", "int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype,
tensor_op=tensor.all)
self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype,
tensor_op=tensor.any)
for dtype in ["int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
示例3: compute_weights
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def compute_weights(self, energies, attended_mask):
if self.energy_normalizer == 'softmax':
logger.debug("Using softmax attention weights normalization")
energies = energies - energies.max(axis=0)
unnormalized_weights = tensor.exp(energies)
elif self.energy_normalizer == 'logistic':
logger.debug("Using smoothfocus (logistic sigm) "
"attention weights normalization")
unnormalized_weights = tensor.nnet.sigmoid(energies)
elif self.energy_normalizer == 'relu':
logger.debug("Using ReLU attention weights normalization")
unnormalized_weights = tensor.maximum(energies/1000., 0.0)
else:
raise Exception("Unknown energey_normalizer: {}"
.format(self.energy_computer))
if attended_mask:
unnormalized_weights *= attended_mask
# If mask consists of all zeros use 1 as the normalization coefficient
normalization = (unnormalized_weights.sum(axis=0) +
tensor.all(1 - attended_mask, axis=0))
return unnormalized_weights / normalization
示例4: zeros
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def zeros(shape, dtype=None, name=None):
'''Instantiates an all-zeros variable.
'''
if dtype is None:
dtype = floatx()
return variable(np.zeros(shape), dtype, name)
示例5: ones
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def ones(shape, dtype=None, name=None):
'''Instantiates an all-ones variable.
'''
if dtype is None:
dtype = floatx()
return variable(np.ones(shape), dtype, name)
示例6: all
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def all(x, axis=None, keepdims=False):
'''Bitwise reduction (logical AND).
'''
return T.all(x, axis=axis, keepdims=keepdims)
示例7: isvalid
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def isvalid(x):
return T.all(T.logical_not(T.logical_or(T.isnan(x), T.isinf(x))))
示例8: with_linker
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def with_linker(self, linker):
for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
((1, 2, 3), (1, 2), (2, 3)),
((1, 2, 1, 3), (1, 3), (2, 3)),
((2, 3, 4), (2, 1, 0), (4, 3, 2)),
((2, 3, 4), ('x', 2, 1, 0, 'x'),
(1, 4, 3, 2, 1)),
((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
((1, 1, 4), (1, 2), (1, 4)),
((1, 1, 1), (), ()),
((1,), ('x', 'x'), (1, 1))]:
ib = [(entry == 1) for entry in xsh]
x = self.type(self.dtype, ib)('x')
e = self.op(ib, shuffle)(x)
f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
assert f(numpy.ones(xsh, dtype=self.dtype)).shape == zsh
# test that DimShuffle.infer_shape work correctly
x = self.type(self.dtype, ib)('x')
e = self.op(ib, shuffle)(x)
f = copy(linker).accept(FunctionGraph([x],
[e.shape])).make_function()
assert all(f(numpy.ones(xsh, dtype=self.dtype))) == all(zsh)
# Test when we drop a axis that is not broadcastable
ib = [False, True, False]
x = self.type(self.dtype, ib)('x')
self.assertRaises(ValueError, self.op, ib, shuffle)
# Test when we drop a axis that don't have shape 1
ib = [True, True, False]
x = self.type(self.dtype, ib)('x')
e = self.op(ib, (1, 2))(x)
f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))
# Test that we can't take a dimensions multiple time
xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
ib = [False, True, False]
x = self.type(self.dtype, ib)('x')
self.assertRaises(ValueError, DimShuffle, ib, shuffle)
示例9: with_linker_inplace
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def with_linker_inplace(self, linker, op, type, rand_val):
for xsh, ysh in [((5, 5), (5, 5)),
((5, 5), (1, 5)),
((5, 5), (5, 1)),
((1, 1), (1, 1)),
((2, 3, 4, 5), (2, 3, 4, 5)),
((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]:
x = type('float64', [(entry == 1) for entry in xsh])('x')
y = type('float64', [(entry == 1) for entry in ysh])('y')
e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
xv = rand_val(xsh)
yv = rand_val(ysh)
zv = xv + yv
f(xv, yv)
self.assertTrue((xv == zv).all())
# test Elemwise.infer_shape
# the Shape op don't implement c_code!
if isinstance(linker, gof.PerformLinker):
x = type('float64', [(entry == 1) for entry in xsh])('x')
y = type('float64', [(entry == 1) for entry in ysh])('y')
e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
f = copy(linker).accept(FunctionGraph(
[x, y], [e.shape])).make_function()
xv = rand_val(xsh)
yv = rand_val(ysh)
zv = xv + yv
f(xv, yv)
assert xv.shape == zv.shape
示例10: test_fill
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_fill(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = self.ctype('float64', [0, 0])('x')
y = self.ctype('float64', [1, 1])('y')
for linker, op in zip(self.linkers, [self.op, self.cop]):
e = op(scalar.Second(scalar.transfer_type(0)), {0: 0})(x, y)
f = linker().accept(FunctionGraph([x, y], [e])).make_function()
xv = self.rand_cval((5, 5))
yv = self.rand_cval((1, 1))
f(xv, yv)
assert (xv == yv).all()
示例11: test_weird_strides
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_weird_strides(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = self.ctype('float64', [0, 0, 0, 0, 0])('x')
y = self.ctype('float64', [0, 0, 0, 0, 0])('y')
for linker, op in zip(self.linkers, [self.op, self.cop]):
e = op(scalar.add)(x, y)
f = linker().accept(FunctionGraph([x, y], [e])).make_function()
xv = self.rand_cval((2, 2, 2, 2, 2))
yv = self.rand_cval((2, 2, 2, 2, 2)).transpose(4, 0, 3, 1, 2)
zv = xv + yv
assert (f(xv, yv) == zv).all()
示例12: test_same_inputs
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_same_inputs(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = self.ctype('float64', [0, 0])('x')
for linker, op in zip(self.linkers, [self.op, self.cop]):
e = op(scalar.add)(x, x)
f = linker().accept(FunctionGraph([x], [e])).make_function()
xv = self.rand_cval((2, 2))
zv = xv + xv
assert (f(xv) == zv).all()
示例13: test_perform_nan
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_perform_nan(self):
for dtype in ["floatX", "complex64", "complex128"]:
self.with_linker(gof.PerformLinker(), scalar.add, dtype=dtype,
test_nan=True)
self.with_linker(gof.PerformLinker(), scalar.mul, dtype=dtype,
test_nan=True)
self.with_linker(gof.PerformLinker(), scalar.maximum, dtype=dtype,
test_nan=True)
self.with_linker(gof.PerformLinker(), scalar.minimum, dtype=dtype,
test_nan=True)
self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype,
test_nan=True, tensor_op=tensor.any)
self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype,
test_nan=True, tensor_op=tensor.all)
示例14: run_isfunc
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def run_isfunc(self, isfunc):
for input in (self.scalar, self.vector):
theano_isfunc = theano.function([input],
getattr(tensor, isfunc)(input),
mode=self.mode)
numpy_isfunc = getattr(numpy, isfunc)
for x in self.test_vals:
if ((x.ndim == 0 and input is not self.scalar) or
(x.ndim == 1 and input is not self.vector)):
# We only test with the appropriate input type.
continue
t_out = theano_isfunc(x)
n_out = numpy_isfunc(x)
assert (t_out == n_out).all(), (t_out, n_out)
示例15: test_all_grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import all [as 别名]
def test_all_grad(self):
x = tensor.bmatrix('x')
x_all = x.all()
gx = theano.grad(x_all, x)
f = theano.function([x], gx)
x_random = self.rng.binomial(n=1, p=0.5, size=(5, 7)).astype('int8')
for x_val in (x_random,
numpy.zeros_like(x_random),
numpy.ones_like(x_random)):
gx_val = f(x_val)
assert gx_val.shape == x_val.shape
assert numpy.all(gx_val == 0)