本文整理汇总了Python中theano.gof.python25.all函数的典型用法代码示例。如果您正苦于以下问题:Python all函数的具体用法?Python all怎么用?Python all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了all函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_gpualloc
def local_gpualloc(node):
replace = False
if node.op == tensor.alloc:
if node.inputs[0].owner and node.inputs[0].owner.op == host_from_gpu:
replace = True
elif all([c != 'output' and c.op == gpu_from_host
for c, idx in node.outputs[0].clients]):
replace = True
elif all([c != 'output' and c.op == tensor.join and
all([i.owner and i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:]])
for c, idx in node.outputs[0].clients]):
replace = True
if replace:
val = node.inputs[0]
shp = node.inputs[1:]
old_out = node.outputs[0]
val2 = tensor.shape_padleft(val, len(shp) - val.ndim)
new_out = host_from_gpu(gpu_alloc(val, *shp))
if new_out.type != old_out.type:
assert new_out.type.ndim == old_out.type.ndim
assert new_out.type.dtype == old_out.type.dtype
for b_old, b_new in zip(old_out.type.broadcastable,
new_out.type.broadcastable):
assert b_new or (not b_old)
new_out = tensor.patternbroadcast(new_out. old_out.broadcastable)
return [new_out]
示例2: test_multiple_out_grad
def test_multiple_out_grad(self):
# Tests that we can compute the gradients through lazy if
x1 = tensor.vector('x1')
x2 = tensor.vector('x2')
y1 = tensor.vector('y1')
y2 = tensor.vector('y2')
c = tensor.iscalar('c')
z = ifelse(c, (x1, x2), (y1, y2))
grads = tensor.grad(z[0].sum() + z[1].sum(),
[x1, x2, y1, y2])
f = theano.function([c, x1, x2, y1, y2], grads)
rng = numpy.random.RandomState(utt.fetch_seed())
lens = [rng.randint(200) for i in range(4)]
values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
for l in lens]
outs_1 = f(1, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_1[0] == 1.)
assert numpy.all(outs_1[1] == 1.)
assert numpy.all(outs_1[2] == 0.)
assert numpy.all(outs_1[3] == 0.)
outs_0 = f(0, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_0[0] == 0.)
assert numpy.all(outs_0[1] == 0.)
assert numpy.all(outs_0[2] == 1.)
assert numpy.all(outs_0[3] == 1.)
示例3: local_gpuaalloc2
def local_gpuaalloc2(node):
"""
Join(axis, Alloc, Alloc, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
if isinstance(node.op, tensor.Alloc) and all(
c != "output"
and c.op == tensor.join
and all(i.owner and i.owner.op in [host_from_gpu, tensor.alloc] for i in c.inputs[1:])
for c, idx in node.outputs[0].clients
):
return [host_from_gpu(gpu_alloc(*node.inputs))]
示例4: test_give_variables_names_small
def test_give_variables_names_small():
x = theano.tensor.matrix('x')
y = theano.tensor.dot(x, x)
fgraph = theano.FunctionGraph((x,), (y,))
give_variables_names(fgraph.variables)
assert all(var.name for var in fgraph.variables)
assert unique([var.name for var in fgraph.variables])
示例5: is_updates
def is_updates(elem):
if isinstance(elem, dict):
return True
# Dictionaries can be given as lists of tuples
if isinstance(elem, (list, tuple)) and all([isinstance(x, (list, tuple)) and len(x) == 2 for x in elem]):
return True
return False
示例6: is_outputs
def is_outputs(elem):
if (isinstance(elem, (list, tuple)) and
all([isinstance(x, theano.Variable) for x in elem])):
return True
if isinstance(elem, theano.Variable):
return True
return False
示例7: guess_n_streams
def guess_n_streams(size, warn=True):
"""
Return a guess at a good number of streams.
:param warn: If True, warn when a guess cannot be made (in which case
we return 30 * 256).
"""
# TODO: a smart way of choosing the number of streams, see #612.
# Note that this code was moved out of `MRG_RandomStreams` so that it can
# be easily accessed from tests, where we want to disable the warning.
if (isinstance(size, (tuple, list)) and
all([isinstance(i, int) for i in size])):
# We can make a guess.
r = 1
for s in size:
r *= s
if r > 6:
r = r/6 # chosen as fastest for rbm_benchmark
return r
else:
if warn:
warnings.warn((
"MRG_RandomStreams Can't determine #streams from "
"size (%s), guessing 30*256") % str(size),
stacklevel=3)
return 30 * 256
示例8: normal
def normal(self, size=None, avg=0.0, std=1.0, ndim=None,
dtype=config.floatX):
"""
Return symbolic tensor of normally-distributed numbers.
:param: size: Can be a list of integer or Theano variable(ex: the shape
of other Theano Variable)
"""
if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i, int) or isinstance(i, Variable)
for i in size]), msg
else:
msg = "size must be a tuple of int or a Theano variable"
assert isinstance(size, Variable) and size.ndim == 1, msg
generator = theano.shared(False) # makes a generic
s_size = theano.tensor.as_tensor_variable(size)
u = CURAND_Normal.new_auto_update(generator, ndim, dtype, s_size,
self.next_seed())
self.state_updates.append(u.update)
rval = u * std + avg
if u.type.broadcastable != rval.type.broadcastable:
raise NotImplementedError(
'Increase the size to match the broadcasting pattern of `low`'
'and `high` arguments'
)
return rval
示例9: test_zeros_basic
def test_zeros_basic():
for shp in [(3,4,5), (300,), (), (0,7)]:
_a = cuda_ndarray.CudaNdarray.zeros(shp)
_n = numpy.zeros(shp, dtype="float32")
assert numpy.allclose(numpy.asarray(_a), _n)
assert _a.shape == _n.shape
assert all(_a._strides == numpy.asarray(_n.strides)/4)
# TODO:The following don't have the same stride!
# This should be fixed with the new GpuNdArray.
for shp in [(3,0), (4,1,5)]:
_a = cuda_ndarray.CudaNdarray.zeros(shp)
_n = numpy.zeros(shp, dtype="float32")
assert numpy.allclose(numpy.asarray(_a), _n)
assert _a.shape == _n.shape
try:
_n = numpy.zeros()
except TypeError:
pass
else:
raise Exception("An error was expected!")
try:
_a = cuda_ndarray.CudaNdarray.zeros()
except TypeError:
pass
else:
raise Exception("An error was expected!")
示例10: test_mpi_tag_ordering
def test_mpi_tag_ordering():
x = recv((2, 2), "float32", 1, 12)
y = recv((2, 2), "float32", 1, 11)
z = recv((2, 2), "float32", 1, 13)
f = theano.function([], [x, y, z], mode=mpi_mode)
nodes = f.maker.linker.make_all()[-1]
assert all(node.op.tag == tag for node, tag in zip(nodes, (11, 12, 13, 11, 12, 13)))
示例11: test_give_variables_names
def test_give_variables_names():
x = theano.tensor.matrix('x')
y = x + 1
z = theano.tensor.dot(x, y)
variables = (x, y, z)
give_variables_names(variables)
assert all(var.name for var in variables)
assert unique([var.name for var in variables])
示例12: __setup_node__
def __setup_node__(self, node):
# sets up node so it belongs to this fgraph
if hasattr(node, 'fgraph') and node.fgraph is not self:
raise Exception("%s is already owned by another fgraph" % node)
if (hasattr(node.op, 'view_map') and
not all([isinstance(view, (list, tuple))
for view in node.op.view_map.values()])):
raise Exception("Op '%s' have a bad view map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.view_map)))
if (hasattr(node.op, 'destroy_map') and
not all([isinstance(destroy, (list, tuple))
for destroy in node.op.destroy_map.values()])):
raise Exception("Op '%s' have a bad destroy map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.destroy_map)))
node.fgraph = self
node.deps = {}
示例13: test_mpi_schedule
def test_mpi_schedule():
x = theano.tensor.matrix("x")
y = send(x, 1, 11)
z = x + x
waitnode = y.owner
sendnode = y.owner.inputs[0].owner
addnode = z.owner
f = theano.function([x], [y, z], mode=mpi_mode)
nodes = f.maker.linker.make_all()[-1]
optypes = [MPISend, theano.tensor.Elemwise, MPISendWait]
assert all(isinstance(node.op, optype) for node, optype in zip(nodes, optypes))
示例14: test_specify_shape_inplace
def test_specify_shape_inplace(self):
# test that specify_shape don't break inserting inplace op
dtype = self.dtype
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
a = self.cast_value(a)
a_shared = self.shared_constructor(a)
b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
b = self.cast_value(b)
b_shared = self.shared_constructor(b)
s = numpy.zeros((40, 40), dtype=dtype)
s = self.cast_value(s)
s_shared = self.shared_constructor(s)
f = theano.function([], updates={s_shared: theano.dot(a_shared, b_shared) + s_shared})
topo = f.maker.env.toposort()
f()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
# Their is no inplace gemm for sparse
# assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
s_shared_specify = tensor.specify_shape(s_shared, s_shared.get_value(borrow=True).shape)
# now test with the specify shape op in the output
f = theano.function(
[], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
)
topo = f.maker.env.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
# now test with the specify shape op in the inputs and outputs
a_shared = tensor.specify_shape(a_shared, a_shared.get_value(borrow=True).shape)
b_shared = tensor.specify_shape(b_shared, b_shared.get_value(borrow=True).shape)
f = theano.function(
[], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
)
topo = f.maker.env.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
示例15: with_linker
def with_linker(self, linker):
for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
((1, 2, 3), (1, 2), (2, 3)),
((1, 2, 1, 3), (1, 3), (2, 3)),
((2, 3, 4), (2, 1, 0), (4, 3, 2)),
((2, 3, 4), ('x', 2, 1, 0, 'x'),
(1, 4, 3, 2, 1)),
((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
((1, 1, 4), (1, 2), (1, 4)),
((1, 1, 1), (), ()),
((1,), ('x', 'x'), (1, 1))]:
ib = [(entry == 1) for entry in xsh]
x = TensorType('float64', ib)('x')
e = DimShuffle(ib, shuffle)(x)
f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
assert f(numpy.ones(xsh)).shape == zsh
#test that DimShuffle.infer_shape work correctly
x = TensorType('float64', ib)('x')
e = DimShuffle(ib, shuffle)(x)
f = copy(linker).accept(FunctionGraph([x], [e.
shape])).make_function()
assert all(f(numpy.ones(xsh))) == all(zsh)
# Test when we drop a axis that is not broadcastable
ib = [False, True, False]
x = TensorType('float64', ib)('x')
self.assertRaises(ValueError, DimShuffle, ib, shuffle)
# Test when we drop a axis that don't have shape 1
ib = [True, True, False]
x = TensorType('float64', ib)('x')
e = DimShuffle(ib, (1, 2))(x)
f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))
# Test that we can't take a dimensions multiple time
xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
ib = [False, True, False]
x = TensorType('float64', ib)('x')
self.assertRaises(ValueError, DimShuffle, ib, shuffle)