本文整理汇总了Python中theano.tensor.fvector方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.fvector方法的具体用法?Python tensor.fvector怎么用?Python tensor.fvector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.fvector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_param_allow_downcast_vector_floatX
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_param_allow_downcast_vector_floatX(self):
a = tensor.fvector('a')
b = tensor.fvector('b')
c = tensor.fvector('c')
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# If the values can be accurately represented, everything is OK
z = [0]
assert numpy.all(f(z, z, z) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f([0.1], z, z), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, z, [0.1], z)
# If allow_downcast is None, like False
self.assertRaises(TypeError, f, z, z, [0.1])
示例2: test_Strides1D
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_Strides1D(self):
x = T.fvector('x')
for axis in [0, None, -1]:
a = np.random.random((42,)).astype("float32")
cumsum_function = theano.function([x], cumsum(x, axis=axis),
mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in itertools.product(slicings, repeat=x.ndim):
f = theano.function([x], cumsum(x[slicing], axis=axis),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
cumsum_function(a[slicing]))
示例3: test_GpuCumsum1D
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_GpuCumsum1D(self):
block_max_size = self.max_threads_dim0 * 2
x = T.fvector('x')
f = theano.function([x], cumsum(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
# Extensive testing for the first 1025 sizes
a = np.random.random(1025).astype("float32")
for i in xrange(a.shape[0]):
utt.assert_allclose(np.cumsum(a[:i]), f(a[:i]))
# Use multiple GPU threadblocks
a = np.random.random((block_max_size+2,)).astype("float32")
utt.assert_allclose(np.cumsum(a), f(a))
# Use recursive cumsum
a = np.ones((block_max_size*(block_max_size+1)+2,),
dtype="float32")
utt.assert_allclose(np.cumsum(a), f(a))
示例4: test_elemwise3
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_elemwise3():
""" Several kinds of elemwise expressions with dimension
permutations and broadcasting"""
shape = (3, 4, 5, 6)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fvector()
new_val = (a + b).dimshuffle([2, 0, 3, 1])
new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(6), dtype='float32'))
示例5: test_select_distinct
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_select_distinct(self):
"""
Tests that MultinomialWOReplacementFromUniform always selects distinct elements
"""
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 1000
all_indices = range(n_elements)
numpy.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
uni = numpy.random.rand(i).astype(config.floatX)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
res = numpy.squeeze(res)
assert len(res) == i
assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
示例6: test_fail_select_alot
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_fail_select_alot(self):
"""
Tests that MultinomialWOReplacementFromUniform fails when asked to sample more
elements than the actual number of elements
"""
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 200
numpy.random.seed(12345)
uni = numpy.random.rand(n_selected).astype(config.floatX)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
示例7: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def __init__(self, seq_len, n_feature):
import theano.tensor as T
self.Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
self.buildNetwork()
self.output = lasagne.layers.get_output(self.network)
self.params = lasagne.layers.get_all_params(self.network, trainable=True)
self.output_fn = theano.function([self.Input.input_var], self.output)
fx = T.fvector().astype("float64")
choices = T.ivector()
px = self.output[T.arange(self.output.shape[0]), choices]
log_px = T.log(px)
cost = -fx.dot(log_px)
updates = lasagne.updates.adagrad(cost, self.params, 0.0008)
Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
self.train_fn = theano.function([self.Input.input_var, choices, fx], [cost, px, log_px], updates=updates)
示例8: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def __init__(self, computeGradient = True):
super(CpuCtc,self).__init__()
self.computeGradient = computeGradient
self.costs = T.fvector(name="ctc_cost")
if self.computeGradient:
self.gradients = T.ftensor3(name="ctc_grad")
示例9: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def __init__(self, computeGradient = True):
super(GpuCtc,self).__init__()
self.computeGradient = computeGradient
self.costs = T.fvector(name="ctc_cost")
if self.computeGradient:
self.gradients = CudaNdarrayVariable(name="ctc_grad",
type=CudaNdarrayType(broadcastable=[False, False, False]))
示例10: setUp
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def setUp(self):
self.s = tensor.iscalar()
self.v = tensor.fvector()
self.m = tensor.dmatrix()
self.t = tensor.ctensor3()
self.adv1q = tensor.lvector() # advanced 1d query
示例11: test_softmax_with_bias
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_softmax_with_bias():
x = tensor.fmatrix()
b = tensor.fvector()
f = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_with_gpu)
f2 = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[2].op,
cuda.nnet.GpuSoftmaxWithBias)
xv = numpy.random.rand(7, 8).astype('float32')
bv = numpy.random.rand(8).astype('float32')
assert numpy.allclose(f(xv, bv), f2(xv, bv))
示例12: test_vector
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_vector(self):
x = cuda.fvector()
y = numpy.zeros(7, dtype='float32')
assert y.size == theano.function([x], x.size)(y)
示例13: test_select_proportional_to_weight
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_select_proportional_to_weight(self):
"""
Tests that MultinomialWOReplacementFromUniform selects elements, on average,
proportional to the their probabilities
"""
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = numpy.random.rand(n_selected).astype(config.floatX)
res = f(pvals, uni, n_selected)
res = numpy.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = numpy.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol, avg_diff
示例14: test_n_samples_1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_n_samples_1():
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.MultinomialFromUniform('auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
numpy.random.seed(12345)
for i in [1, 5, 10, 100, 1000, 10000]:
uni = numpy.random.rand(2 * i).astype(config.floatX)
res = f([[1.0, 0.0], [0.0, 1.0]], uni, i)
utt.assert_allclose(res, [[i * 1.0, 0.0], [0.0, i * 1.0]])
示例15: test_multinomial_0
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fvector [as 别名]
def test_multinomial_0():
# This tests the MultinomialFromUniform Op directly, not going through the
# multinomial() call in GPU random generation.
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
def body(mode, gpu):
# the m*2 allows the multinomial to reuse output
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode)
if gpu:
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]])
run_with_c(body)
if cuda.cuda_available:
run_with_c(body, True)
# TODO: check a bigger example (make sure blocking on GPU is handled correctly)