本文整理汇总了Python中theano.tensor.scalar方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.scalar方法的具体用法?Python tensor.scalar怎么用?Python tensor.scalar使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.scalar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _make_rnn
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
示例2: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def __init__(self):
X_in = T.matrix('X_in')
u = T.matrix('u')
s = T.vector('s')
eps = T.scalar('eps')
X_ = X_in - T.mean(X_in, 0)
sigma = T.dot(X_.T, X_) / X_.shape[0]
self.sigma = theano.function([X_in], sigma, allow_input_downcast=True)
Z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps))), u.T)
X_zca = T.dot(X_, Z.T)
self.compute_zca = theano.function([X_in, u, s, eps], X_zca, allow_input_downcast=True)
self._u = None
self._s = None
示例3: var
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def var(name, label=None, observed=False, const=False, vector=False, lower=None, upper=None):
if vector and not observed:
raise ValueError('Currently, only observed variables can be vectors')
if observed and const:
raise ValueError('Observed variables are automatically const')
if vector:
var = T.vector(name)
else:
var = T.scalar(name)
var._name = name
var._label = label
var._observed = observed
var._const = observed or const
var._lower = lower or -np.inf
var._upper = upper or np.inf
return var
示例4: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def make_node(self, x, index):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
assert len(index) == 2
input_op = [x]
for ind in index:
if isinstance(ind, slice):
raise Exception("GetItemScalar called with a slice as index!")
# in case of indexing using int instead of theano variable
elif isinstance(ind, integer_types):
ind = theano.tensor.constant(ind)
input_op += [ind]
# in case of indexing using theano variable
elif ind.ndim == 0:
input_op += [ind]
else:
raise NotImplemented()
return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
示例5: structured_monoid
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def structured_monoid(tensor_op):
# Generic operation to perform many kinds of monoid element-wise
# operations on the non-zeros of a sparse matrix.
# The first parameter must always be a sparse matrix. The other parameters
# must be scalars which will be passed as argument to the tensor_op.
def decorator(f):
def wrapper(*args):
x = as_sparse_variable(args[0])
assert x.format in ["csr", "csc"]
xs = [scalar.as_scalar(arg) for arg in args[1:]]
data, ind, ptr, shape = csm_properties(x)
data = tensor_op(data, *xs)
return CSM(x.format)(data, ind, ptr, shape)
wrapper.__name__ = str(tensor_op.scalar_op)
return wrapper
return decorator
示例6: perform
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for j in xrange(len(a_indptr) - 1):
ind0 = a_indptr[j]
ind1 = a_indptr[j + 1]
for i_idx in xrange(ind0, ind1):
i = a_indices[i_idx]
# Depending on the type of g_ab and b (sparse or dense),
# the following dot product can result in a scalar or
# a (1, 1) sparse matrix.
dot_val = numpy.dot(g_ab[i], b[j].T)
if isinstance(dot_val, scipy.sparse.spmatrix):
dot_val = dot_val[0, 0]
g_a_data[i_idx] = dot_val
out[0] = g_a_data
示例7: test_broadcast_grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_broadcast_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
x1 = T.tensor4('x')
x1_data = rng.randn(1, 1, 300, 300)
sigma = T.scalar('sigma')
sigma_data = 20
window_radius = 3
filter_1d = T.arange(-window_radius, window_radius+1)
filter_1d = filter_1d.astype(theano.config.floatX)
filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
filter_1d = filter_1d / filter_1d.sum()
filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])
y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
filter_shape=[1, 1, None, None])
theano.grad(y.sum(), sigma)
示例8: test_perform
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)
# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
示例9: test_gemm_nested
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_gemm_nested():
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
'c'), T.scalar('d')
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=1)
# print "---------------------"
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z + c * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=1)
# print "---------------------"
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z + c * U)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=3)
示例10: test_inplace0
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_inplace0():
# should fail to insert gemm_inplace because gemm_inplace would
# create cycles
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, c = T.matrix('R'), T.matrix('S'), T.scalar('c')
f = inplace_func([Z, b, R, S],
[Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN')
if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
print(pp(f.maker.fgraph.outputs[0]))
raise Failure('gemm_inplace in graph')
assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes]
# gemm_inplace should be inserted here, to work in-place on Z*c
f = inplace_func([X, Y, Z, a, b, R, S, c],
[Z * (c * Z + a * T.dot(X, Y) + b * T.dot(R, S).T)],
mode='FAST_RUN')
if (not gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
theano.printing.debugprint(f)
raise Failure('no gemm_inplace in graph')
示例11: test_c
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_c(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.add, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype)
for dtype in ["floatX", "int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype,
tensor_op=tensor.all)
self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype,
tensor_op=tensor.any)
for dtype in ["int8", "uint8"]:
self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
示例12: test_infer_shape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_infer_shape(self, dtype=None, pre_scalar_op=None):
if dtype is None:
dtype = theano.config.floatX
for xsh, tosum in self.cases:
x = self.type(dtype, [(entry == 1) for entry in xsh])('x')
if pre_scalar_op is not None:
x = pre_scalar_op(x)
if tosum is None:
tosum = list(range(len(xsh)))
xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
d = {}
if pre_scalar_op is not None:
xv = x.eval({x.owner.inputs[0]: xv})
d = {pre_scalar_op: pre_scalar_op}
self._compile_and_check([x],
[self.op(scalar.add, axis=tosum, *d)(x)],
[xv], self.op,
["local_cut_useless_reduce"],
warn=0 not in xsh)
示例13: test_mean_default_dtype
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_mean_default_dtype(self):
"""
Test the default dtype of a mean().
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [], [0], [1], [0, 1]]
for idx, dtype in enumerate(imap(str, theano.scalar.all_types)):
axis = axes[idx % len(axes)]
x = tensor.matrix(dtype=dtype)
m = x.mean(axis=axis)
if dtype in tensor.discrete_dtypes and axis != []:
assert m.dtype == 'float64'
else:
assert m.dtype == dtype, (m, m.dtype, dtype)
f = theano.function([x], m)
data = numpy.random.rand(3, 4) * 10
data = data.astype(dtype)
f(data)
示例14: test_prod_without_zeros_default_dtype
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_prod_without_zeros_default_dtype(self):
"""
Test the default dtype of a ProdWithoutZeros().
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [], [0], [1], [0, 1]]
for idx, dtype in enumerate(imap(str, theano.scalar.all_types)):
axis = axes[idx % len(axes)]
x = ProdWithoutZeros(axis=axis)(tensor.matrix(dtype=dtype))
assert x.dtype == dict(
int8='int64',
int16='int64',
int32='int64',
uint8='uint64',
uint16='uint64',
uint32='uint64',
).get(dtype, dtype)
示例15: test_prod_without_zeros_custom_dtype
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import scalar [as 别名]
def test_prod_without_zeros_custom_dtype(self):
"""
Test ability to provide your own output dtype for a ProdWithoutZeros().
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [], [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for output_dtype in imap(str, theano.scalar.all_types):
axis = axes[idx % len(axes)]
prod_woz_var = ProdWithoutZeros(
axis=axis, dtype=output_dtype)(x)
assert prod_woz_var.dtype == output_dtype
idx += 1
if ('complex' in output_dtype or
'complex' in input_dtype):
continue
f = theano.function([x], prod_woz_var)
data = numpy.random.rand(2, 3) * 3
data = data.astype(input_dtype)
f(data)