本文整理汇总了Python中theano.tensor.tensor函数的典型用法代码示例。如果您正苦于以下问题:Python tensor函数的具体用法?Python tensor怎么用?Python tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tensor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_node
def make_node(self, x):
###
# At least for small matrices (5x5), the .sum() method of a csc matrix returns a dense matrix
# as the result whether axis is 0 or 1... weird!
###
if self.axis is None:
z = tensor.tensor(broadcastable=(), dtype=x.dtype)
elif self.axis == 0:
if x.format == 'csc':
z = tensor.tensor(broadcastable=(False,), dtype=x.dtype)
elif x.format == 'csr':
#return SparseVector() #WRITEME!
raise NotImplementedError()
else:
raise NotImplementedError()
elif self.axis == 1:
if x.format == 'csc':
#return SparseVector() #WRITEME!
raise NotImplementedError()
elif x.format == 'csr':
z = tensor.tensor(broadcastable=(False,), dtype=x.dtype)
else:
raise NotImplementedError()
else:
assert False #axis should have been verified by self.__init__
return gof.Apply(self, [x], [z])
示例2: make_node
def make_node(self, pvals):
pvals = T.as_tensor_variable(pvals)
if self.odtype == 'auto':
odtype = pvals.dtype
vals = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
indx = T.tensor(dtype='int32', broadcastable=pvals.type.broadcastable)
return Apply(self, [pvals,], [vals, indx])
示例3: __init__
def __init__(self, activation, dim=2, issequence=False, inpshape=None):
"""
:type activation: callable
:param activation: Activation function (any element-wise symbolic function)
:type dim: int
:param dim: Dimensionality of the input data
:type issequence: bool
:param issequence: Whether the input is a sequence
:type inpshape: list
:param inpshape: Input shape
"""
super(activationlayer, self).__init__()
# Parse data dimensionality
assert not (dim is None and inpshape is None), "Data dimension can not be parsed. Provide dim or inpshape."
# Meta
self.activation = activation
self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
self.allowsequences = True
self.issequence = self.dim == 2 and len(inpshape) == 5 if issequence is None else issequence
self.inpdim = len(inpshape) if inpshape is not None else 5 if self.issequence else {2: 4, 3: 5}[dim]
# Shape inference
self.inpshape = [None, ] * self.inpdim if inpshape is None else list(inpshape)
# Containers for input and output
self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
self.y = T.tensor('floatX', [False, ] * self.inpdim, name='y:' + str(id(self)))
示例4: test_elemwise_grad_broadcast
def test_elemwise_grad_broadcast():
# This crashed in the past.
x = tensor.tensor(dtype="float32", broadcastable=(True, False, False, False))
y = tensor.tensor(dtype="float32", broadcastable=(True, True, False, False))
theano.grad(theano.tensor.tanh(x).sum(), x)
theano.grad(theano.tensor.tanh(x + y).sum(), y)
theano.grad(theano.tensor.tanh(x + y).sum(), [x, y])
示例5: setUp
def setUp(self, dtype='float64'):
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2,3), dtype=dtype)
self.xval = numpy.asarray([1,2], dtype=dtype)
self.yval = numpy.asarray([1.5,2.7,3.9], dtype=dtype)
示例6: setUp
def setUp(self, dtype='float64'):
# This tests can run even when theano.config.blas.ldflags is empty.
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = np.ones((2, 3), dtype=dtype)
self.xval = np.asarray([1, 2], dtype=dtype)
self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
示例7: setUp
def setUp(self):
self.mode = mode_with_gpu
dtype = self.dtype = 'float32' # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
# data on the gpu make the op always inplace
self.ger = gpu_ger_inplace
self.ger_destructive = gpu_ger_inplace
self.gemm = tcn.blas.gpu_gemm_inplace
示例8: setUp
def setUp(self):
self.mode = theano.compile.get_default_mode().including('fast_run')
dtype = self.dtype = 'float64' # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2,3), dtype=dtype)
self.xval = numpy.asarray([1,2], dtype=dtype)
self.yval = numpy.asarray([1.5,2.7,3.9], dtype=dtype)
if not theano.tensor.blas_scipy.optimizations_enabled:
self.SkipTest()
示例9: test_numpy_2d
def test_numpy_2d(self):
for shp0 in [(2, 3)]:
x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
for shp1 in [(6, 7)]:
if len(shp0) + len(shp1) == 2:
continue
y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
f = function([x, y], kron(x, y))
b = self.rng.rand(*shp1).astype(config.floatX)
out = f(a, b)
assert numpy.allclose(out, numpy.kron(a, b))
示例10: setUp
def setUp(self, dtype="float64"):
if theano.config.blas.ldflags == "":
raise SkipTest("This test is useful only when Theano" " is directly linked to blas.")
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including("fast_run")
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
示例11: setUp
def setUp(self):
self.mode = theano.compile.get_default_mode()
self.mode = self.mode.including("fast_run")
self.mode = self.mode.excluding("c_blas") # c_blas trumps scipy Ops
dtype = self.dtype = "float64" # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
if not theano.tensor.blas_scipy.have_fblas:
self.SkipTest()
示例12: make_node
def make_node(self, x, gz):
assert isinstance(x, Variable)
assert isinstance(gz, Variable)
gx = tensor(dtype=scal.upcast(gz.dtype, x.dtype),
broadcastable=x.broadcastable)
op = self
return Apply(op, [x, gz], [gx])
示例13: test_incsub_f16
def test_incsub_f16():
shp = (3, 3)
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype='float16').reshape(shp) + 1
yval = np.empty((2,) + shp[1:], dtype='float16')
yval[:] = 2
x = shared(xval, name='x')
y = tensor.tensor(dtype='float16',
broadcastable=(False,) * len(shp),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
np.add.at(rep, [[0, 2]], yval)
assert np.allclose(rval, rep)
expr = tensor.inc_subtensor(x[1:], y)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuIncSubtensor)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[1:] += yval
assert np.allclose(rval, rep)
示例14: make_node
def make_node(self, A, b):
A = as_tensor_variable(A)
b = as_tensor_variable(b)
otype = tensor.tensor(
broadcastable=b.broadcastable,
dtype = (A*b).dtype)
return Apply(self, [A,b], [otype])
示例15: make_node
def make_node(self, A, b):
assert imported_scipy, "Scipy not available. Scipy is needed for the Solve op"
A = as_tensor_variable(A)
b = as_tensor_variable(b)
assert A.ndim == 2
assert b.ndim in [1, 2]
otype = tensor.tensor(broadcastable=b.broadcastable, dtype=(A * b).dtype)
return Apply(self, [A, b], [otype])