本文整理汇总了Python中theano.tensor.constant方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.constant方法的具体用法?Python tensor.constant怎么用?Python tensor.constant使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.constant方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_graph_linker
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def print_graph_linker(print_prog=True):
if 1:
imap = {None:'-'}
def blah(i, node, thunk):
imap[node] = str(i)
if print_prog:# and node.op.__class__ is T.DimShuffle:
if False and node.op == T.DimShuffle((), ['x', 'x'], inplace = True):
print(node.op == T.DimShuffle((), ['x', 'x'],
inplace=True), end=' ')
print(node.inputs[0], type(node.inputs[0]), end=' ')
print(node.inputs[0].equals(T.constant(2)), end=' ')
outputs = node.outputs
inputs = theano.gof.graph.inputs(outputs)
print('node ', i, node, end=' ')
print(':'.join([imap[inp.owner] for inp in node.inputs]))
#print theano.sandbox.pprint.pp.process_graph(inputs, outputs)
return theano.sandbox.wraplinker.WrapLinkerMany(
[theano.gof.OpWiseCLinker()],
[theano.sandbox.wraplinker.run_all
,blah
#,theano.sandbox.wraplinker.numpy_notall_isfinite
])
else:
return theano.gof.OpWiseCLinker()
示例2: test_csm_unsorted
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def test_csm_unsorted(self):
"""
Test support for gradients of unsorted inputs.
"""
sp_types = {'csc': sp.csc_matrix,
'csr': sp.csr_matrix}
for format in ['csr', 'csc', ]:
for dtype in ['float32', 'float64']:
x = tensor.tensor(dtype=dtype, broadcastable=(False,))
y = tensor.ivector()
z = tensor.ivector()
s = tensor.ivector()
# Sparse advanced indexing produces unsorted sparse matrices
a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,
unsorted_indices=True)[1][0]
# Make sure it's unsorted
assert not a.has_sorted_indices
def my_op(x):
y = tensor.constant(a.indices)
z = tensor.constant(a.indptr)
s = tensor.constant(a.shape)
return tensor.sum(
dense_from_sparse(CSM(format)(x, y, z, s) * a))
verify_grad_sparse(my_op, [a.data])
示例3: get_size
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def get_size(self, shape_info):
# The size is the data, that have constant size.
state = numpy.random.RandomState().get_state()
size = 0
for elem in state:
if isinstance(elem, str):
size += len(elem)
elif isinstance(elem, numpy.ndarray):
size += elem.size * elem.itemsize
elif isinstance(elem, int):
size += numpy.dtype("int").itemsize
elif isinstance(elem, float):
size += numpy.dtype("float").itemsize
else:
raise NotImplementedError()
return size
示例4: infer_shape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def infer_shape(self, node, i_shapes):
r, shp = node.inputs[0:2]
# if shp is a constant array of len 0, then it means 'automatic shape'
unknown_shape = len(getattr(shp, 'data', [0, 1, 2])) == 0
# if ndim_added == 0 and shape != () then shape
if self.ndim_added == 0 and not unknown_shape:
sample_shp = shp
else:
# if shape == () then it will depend on args
# if ndim_added != 0 and shape != () then it will depend on args
# Use the default infer_shape implementation.
raise tensor.ShapeError()
return [None, [sample_shp[i] for i in xrange(node.outputs[1].ndim)]]
示例5: test_constant
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def test_constant(self):
orig_compute_test_value = theano.config.compute_test_value
try:
theano.config.compute_test_value = 'raise'
x = T.constant(numpy.random.rand(2, 3), dtype=config.floatX)
y = theano.shared(numpy.random.rand(3, 6).astype(config.floatX),
'y')
# should work
z = T.dot(x, y)
assert hasattr(z.tag, 'test_value')
f = theano.function([], z)
assert _allclose(f(), z.tag.test_value)
# this test should fail
x = T.constant(numpy.random.rand(2, 4), dtype=config.floatX)
self.assertRaises(ValueError, T.dot, x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
示例6: test_gpualloc
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def test_gpualloc():
'''
This tests tries to catch the scenario when, due to infer_shape,
the input of the alloc changes from tensor scalar to a constant
1. In this case the original constracted broadcastable pattern will
have a False for that dimension, but the new broadcastable pattern
that will be inserted by gpualloc will have a True since it knows the
dimension is 1 and therefore broadcastable.
'''
x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
m = (x).dimshuffle(['x', 0])
v = tensor.alloc(1., *m.shape)
f = theano.function([], v + x,
mode=mode_with_gpu.excluding("local_elemwise_alloc"))
l = f.maker.fgraph.toposort()
assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l])
示例7: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def make_node(self, x, index):
assert isinstance(x.type, TypedListType)
if not isinstance(index, Variable):
if isinstance(index, slice):
index = Constant(SliceType(), index)
return Apply(self, [x, index], [x.type()])
else:
index = T.constant(index, ndim=0, dtype='int64')
return Apply(self, [x, index], [x.ttype()])
if isinstance(index.type, SliceType):
return Apply(self, [x, index], [x.type()])
elif isinstance(index, T.TensorVariable) and index.ndim == 0:
assert index.dtype == 'int64'
return Apply(self, [x, index], [x.ttype()])
else:
raise TypeError('Expected scalar or slice as index.')
示例8: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.p == 0:
return input
else:
# Using theano constant to prevent upcasting
one = T.constant(1)
retain_prob = one - self.p
if self.rescale:
input /= retain_prob
mask = _srng.binomial(input.shape[:2], p=retain_prob,
dtype=theano.config.floatX)
axes = [0, 1] + (['x'] * (input.ndim - 2))
mask = mask.dimshuffle(*axes)
return input * mask
示例9: stop_gradient
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
# Arguments
variables: tensor or list of tensors to consider constant with respect
to any other variable.
# Returns
A single tensor or a list of tensors (depending on the passed argument)
that has constant gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(theano.gradient.disconnected_grad, variables)
else:
return theano.gradient.disconnected_grad(variables)
# CONTROL FLOW
示例10: test_mixin_sklearn_params
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def test_mixin_sklearn_params():
# get_params
p = Normal(mu=0.0, sigma=1.0)
params = p.get_params()
assert len(params) == 2
assert "mu" in params
assert "sigma" in params
# for parameters, set_params should change the value contained
old_mu = p.get_params()["mu"]
p.set_params(mu=42.0)
new_mu = p.get_params()["mu"]
assert old_mu is new_mu
assert new_mu.get_value() == 42.0
# check errors
p = Normal(mu=T.constant(0.0), sigma=1.0)
assert_raises(ValueError, p.set_params, mu=1.0)
示例11: test_fit
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def test_fit():
p1 = Normal(mu=T.constant(0.0), sigma=T.constant(2.0))
p2 = Normal(mu=T.constant(3.0), sigma=T.constant(2.0))
p3 = Exponential(inverse_scale=T.constant(0.5))
g = theano.shared(0.5)
m = Mixture(components=[p1, p2, p3], weights=[g, g*g])
X = np.concatenate([st.norm(loc=0.0, scale=2.0).rvs(300, random_state=0),
st.norm(loc=3.0, scale=2.0).rvs(100, random_state=1),
st.expon(scale=1. / 0.5).rvs(500, random_state=2)])
X = X.reshape(-1, 1)
s0 = m.score(X)
m.fit(X)
assert np.abs(g.eval() - 1. / 3.) < 0.05
assert m.score(X) >= s0
示例12: huber_loss
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def huber_loss(mx, Sx, target, Q, width=1.0, *args, **kwargs):
'''
Huber loss
'''
if Sx is None:
# deterministic case
if mx.ndim == 1:
mx = mx[None, :]
delta = mx-target
Q = tt.constant(Q) if isinstance(Q, np.ndarray) else Q
deltaQ = delta.dot(Q)
abs_deltaQ = abs(deltaQ)
cost = tt.switch(
abs_deltaQ <= width,
0.5*deltaQ**2,
width*(abs_deltaQ - width/2)).sum(-1)
return cost
else:
# stochastic case (moment matching)
raise NotImplementedError
示例13: sample_noise
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def sample_noise(self, input):
# get noise_shape
noise_shape = self.input_shape
if any(s is None for s in noise_shape):
noise_shape = input.shape
# respect shared axes
if self.shared_axes:
shared_axes = tuple(a if a >= 0 else a + input.ndim
for a in self.shared_axes)
noise_shape = tuple(1 if a in shared_axes else s
for a, s in enumerate(noise_shape))
one = tt.constant(1)
retain_prob = one - self.p
noise = self._srng.binomial(noise_shape, p=retain_prob,
dtype=floatX)
if self.shared_axes:
bcast = tuple(bool(s == 1) for s in noise_shape)
noise = tt.patternbroadcast(noise, bcast)
return noise
示例14: upsample_bilinear
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def upsample_bilinear(x, scale):
'''
Bilinearly upsamples x:
(nimgs, nfeat, h, w) -> (nimgs, nfeat, h*scale, w*scale)
'''
kx = np.linspace(0, 1, scale + 1)[1:-1]
kx = np.concatenate((kx, [1], kx[::-1]))
ker = kx[xx,:] * kx[:, xx]
ker = T.constant(ker[xx,xx,:,:].astype(np.float32))
xbatch = x.reshape((x.shape[0] * x.shape[1], 1, x.shape[2], x.shape[3]))
xup = conv(xbatch, ker, 'valid', transpose=True, stride=scale)
return xup.reshape((x.shape[0], x.shape[1], xup.shape[2], xup.shape[3]))
示例15: setUp
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import constant [as 别名]
def setUp(self):
super(AddSSDataTester, self).setUp()
self.op_class = AddSSData
for format in sparse.sparse_formats:
variable = getattr(theano.sparse, format + '_matrix')
rand = numpy.array(
numpy.random.random_integers(3, size=(3, 4)) - 1,
dtype=theano.config.floatX)
constant = as_sparse_format(rand, format)
self.x[format] = [variable() for t in range(2)]
self.a[format] = [constant for t in range(2)]