本文整理汇总了Python中theano.tensor.ftensor3方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.ftensor3方法的具体用法?Python tensor.ftensor3怎么用?Python tensor.ftensor3使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.ftensor3方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_blocksparse_inplace_gemv_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例2: test_blocksparse_inplace_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例3: test_sparseblockdot
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_sparseblockdot(self):
"""
Compares the numpy version of sparseblockgemv to sparse_block_dot.
"""
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
utt.assert_allclose(ref_out, th_out)
示例4: test_sparseblockgemv
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_sparseblockgemv(self):
"""
Compares the numpy and theano versions of sparseblockgemv.
"""
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
utt.assert_allclose(ref_out, th_out)
示例5: test_sparseblockouter
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_sparseblockouter(self):
o = tensor.ftensor4()
x = tensor.ftensor3()
y = tensor.ftensor3()
xIdx = tensor.imatrix()
yIdx = tensor.imatrix()
out = self.outer_op(o, x, y, xIdx, yIdx)
f = theano.function([o, x, y, xIdx, yIdx], out,
on_unused_input="warn", mode=self.mode)
o_val, x_val, y_val, xIdx_val, yIdx_val = \
BlockSparse_Gemv_and_Outer.outer_data()
th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.outer_numpy(
o_val, x_val, y_val, xIdx_val, yIdx_val)
utt.assert_allclose(ref_out, th_out)
示例6: test_Strides3D
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_Strides3D(self):
x = T.ftensor3('x')
for axis in [0, 1, 2, None, -1, -2, -3]:
a = np.random.random((42, 30, 25)).astype("float32")
cumsum_function = theano.function([x], cumsum(x, axis=axis),
mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in itertools.product(slicings, repeat=x.ndim):
f = theano.function([x], cumsum(x[slicing], axis=axis),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
cumsum_function(a[slicing]))
示例7: test_batched_dot_errors
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_batched_dot_errors(self):
def fail(a_shp, b_shp):
a=numpy.random.randn(*a_shp).astype(numpy.float32)
b=numpy.random.randn(*b_shp).astype(numpy.float32)
x=tensor.ftensor3()
y=tensor.ftensor3()
f=theano.function([x,y], batched_dot(x,y), mode=mode_with_gpu)
z = f(a,b)
# Different batch size
self.assertRaises(RuntimeError, fail, (5,4,3), (6,3,2))
# Shape mismatch
self.assertRaises(RuntimeError, fail, (5,4,3), (5,2,2))
示例8: setup
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute the dice. the result of the softmax and the ground truth.")
if len(bottom[0].data.shape)==4 :
self.prediction = T.fmatrix()
self.ground_truth = T.fmatrix()
elif len(bottom[0].data.shape)==5 :
self.prediction = T.ftensor3()
self.ground_truth = T.ftensor3()
else:
raise Exception('DiceIndexLayer only supports 2D or 3D data at the moment.')
intersection = T.sum(self.prediction * self.ground_truth)
denominator = T.sum(self.prediction) + T.sum(self.ground_truth)
dice = 2 * intersection / (denominator + 0.00001)
self.f = theano.function([self.prediction, self.ground_truth], dice)
top[0].reshape(1)
示例9: test_blocksparse_inplace_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例10: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def __init__(self, computeGradient = True):
super(CpuCtc,self).__init__()
self.computeGradient = computeGradient
self.costs = T.fvector(name="ctc_cost")
if self.computeGradient:
self.gradients = T.ftensor3(name="ctc_grad")
示例11: test_sparseblockgemvF
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_sparseblockgemvF(self):
"""
Test the fortan order for W (which can happen in the grad for some
graphs).
"""
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = self.gemv_op(b.take(oIdx, axis=0),
tensor.DimShuffle((False, False, False, False),
(0, 1, 3, 2))
(tensor.as_tensor_variable(W)),
h, iIdx, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(numpy.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val,
oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
utt.assert_allclose(ref_out, th_out)
示例12: test_dot_infershape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_dot_infershape(self):
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
self._compile_and_check([W, h, iIdx, b, oIdx],
[sparse_block_dot(W, h, iIdx, b, oIdx)],
self.gemv_data(),
self.gemv_class)
示例13: test_gemv_infershape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_gemv_infershape(self):
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
self._compile_and_check(
[W, h, iIdx, b, oIdx],
[self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)],
self.gemv_data(),
self.gemv_class)
示例14: test_outer_infershape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_outer_infershape(self):
o = tensor.ftensor4()
x = tensor.ftensor3()
y = tensor.ftensor3()
xIdx = tensor.imatrix()
yIdx = tensor.imatrix()
self._compile_and_check([o, x, y, xIdx, yIdx],
[self.outer_op(o, x, y, xIdx, yIdx)],
self.outer_data(),
self.outer_class)
示例15: test_infer_shape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ftensor3 [as 别名]
def test_infer_shape(self):
# only matrix/matrix is supported
admat = tensor.ftensor3()
bdmat = tensor.ftensor3()
admat_val = my_rand(7, 4, 5)
bdmat_val = my_rand(7, 5, 3)
self._compile_and_check([admat, bdmat],
[GpuBatchedDot()(admat, bdmat)],
[admat_val, bdmat_val],
GpuBatchedDot)