本文整理汇总了Python中theano.tensor.advanced_inc_subtensor1函数的典型用法代码示例。如果您正苦于以下问题:Python advanced_inc_subtensor1函数的具体用法?Python advanced_inc_subtensor1怎么用?Python advanced_inc_subtensor1使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了advanced_inc_subtensor1函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_incsub_f16
def test_incsub_f16():
shp = (3, 3)
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype='float16').reshape(shp) + 1
yval = np.empty((2,) + shp[1:], dtype='float16')
yval[:] = 2
x = shared(xval, name='x')
y = tensor.tensor(dtype='float16',
broadcastable=(False,) * len(shp),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
np.add.at(rep, [[0, 2]], yval)
assert np.allclose(rval, rep)
expr = tensor.inc_subtensor(x[1:], y)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuIncSubtensor)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[1:] += yval
assert np.allclose(rval, rep)
示例2: test_advinc_subtensor1
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
shared = cuda.shared_constructor
# shared = tensor.shared
xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float32")
yval = numpy.asarray([[10, 10, 10], [10, 10, 10]], dtype="float32")
x = shared(xval, name="x")
y = T.fmatrices("y")
expr = T.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.env.toposort()]) == 1
assert numpy.allclose(f(yval), [[11.0, 12.0, 13.0], [4.0, 5.0, 6.0], [17.0, 18.0, 19.0]])
示例3: test_advinc_subtensor1
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
for shp in [(3, 3), (3, 3, 3)]:
shared = gpuarray_shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype='float32')
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype='float32',
broadcastable=(False,) * len(shp),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)
示例4: test_deterministic_flag
def test_deterministic_flag():
shp = (3, 4)
for dtype1, dtype2 in [('float32', 'int8')]:
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = np.empty((2,) + shp[1:], dtype=dtype2)
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
np.add.at(rep, [[0, 2]], yval)
assert np.allclose(rval, rep)
示例5: test_advinc_subtensor1_vector_scalar
def test_advinc_subtensor1_vector_scalar():
# Test the case where x is a vector and y a scalar
shp = (3,)
for dtype1, dtype2 in [('float32', 'int8'), ('float32', 'float64')]:
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = np.asarray(10, dtype=dtype2)
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert np.allclose(rval, rep)
示例6: test_advinc_subtensor1_dtype
def test_advinc_subtensor1_dtype():
# Test the mixed dtype case
shp = (3, 4)
for dtype1, dtype2 in [('float32', 'int8'), ('float32', 'float64')]:
shared = gpuarray_shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype=dtype2)
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)