本文整理汇总了Python中theano.tensor.bvector方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.bvector方法的具体用法?Python tensor.bvector怎么用?Python tensor.bvector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.bvector方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_param_allow_downcast_int
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import bvector [as 别名]
def test_param_allow_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert numpy.all(f([3], [6], 1) == 10)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f,
[3], numpy.array([6], dtype='int16'), 1)
# Value too big for a, silently ignored
assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
# Value too big for c, raises TypeError
self.assertRaises(TypeError, f, [3], [6], 806)
示例2: test_allow_input_downcast_int
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import bvector [as 别名]
def test_allow_input_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True)
# Value too big for a, b, or c, silently ignored
assert f([2 ** 20], [1], 0) == 1
assert f([3], [312], 0) == 59
assert f([3], [1], 806) == 42
g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False)
# All values are in range. Since they're not ndarrays (but lists
# or scalars), they will be converted, and their value checked.
assert numpy.all(g([3], [6], 0) == 9)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, g,
[3], numpy.array([6], dtype='int16'), 0)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, g, [3], [312], 0)
h = pfunc([a, b, c], (a + b + c)) # Default: allow_input_downcast=None
# Everything here should behave like with False
assert numpy.all(h([3], [6], 0) == 9)
self.assertRaises(TypeError, h,
[3], numpy.array([6], dtype='int16'), 0)
self.assertRaises(TypeError, h, [3], [312], 0)
示例3: test_param_allow_downcast_int
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import bvector [as 别名]
def test_param_allow_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert numpy.all(f([3], [6], 1) == 10)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f,
[3], numpy.array([6], dtype='int16'), 1)
# Value too big for a, silently ignored
assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
# Value too big for c, raises TypeError
self.assertRaises(TypeError, f, [3], [6], 806)
示例4: test_allow_input_downcast_int
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import bvector [as 别名]
def test_allow_input_downcast_int(self):
a = tensor.wvector('a') # int16
b = tensor.bvector('b') # int8
c = tensor.bscalar('c') # int8
f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True)
# Value too big for a, b, or c, silently ignored
assert f([2 ** 20], [1], 0) == 1
assert f([3], [312], 0) == 59
assert f([3], [1], 806) == 42
g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False)
# All values are in range. Since they're not ndarrays (but lists
# or scalars), they will be converted, and their value checked.
assert numpy.all(g([3], [6], 0) == 9)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, g,
[3], numpy.array([6], dtype='int16'), 0)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, g, [3], [312], 0)
h = pfunc([a, b, c], (a + b + c)) # Default: allow_input_downcast=None
# Everything here should behave like with False
assert numpy.all(h([3], [6], 0) == 9)
self.assertRaises(TypeError, h,
[3], numpy.array([6], dtype='int16'), 0)
self.assertRaises(TypeError, h, [3], [312], 0)
示例5: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import bvector [as 别名]
def __init__(self, param_dict):
self.param_dict = param_dict
self.training_batch_size = param_dict['training_batch_size']
nkerns = param_dict['nkerns']
recept_width = param_dict['recept_width']
pool_width = param_dict['pool_width']
stride = param_dict['stride']
dropout_prob = param_dict['dropout_prob']
weight_decay = param_dict['l2_reg']
activation = param_dict['activation']
weights_variance = param_dict['weights_variance']
n_channels = param_dict['n_channels']
n_timesteps = param_dict['n_timesteps']
n_fbins = param_dict['n_fbins']
global_pooling = param_dict['global_pooling']
rng = np.random.RandomState(23455)
self.training_mode = T.iscalar('training_mode')
self.x = T.tensor4('x')
self.y = T.bvector('y')
self.batch_size = theano.shared(self.training_batch_size)
self.input = self.x.reshape((self.batch_size, 1, n_channels * n_fbins, n_timesteps))
self.feature_extractor = FeatureExtractor(rng, self.input, nkerns, recept_width, pool_width, stride,
self.training_mode,
dropout_prob[0],
activation, weights_variance, n_channels, n_timesteps, n_fbins,
global_pooling)
self.classifier = SoftmaxLayer(rng=rng, input=self.feature_extractor.output, n_in=nkerns[-1],
training_mode=self.training_mode, dropout_prob=dropout_prob[-1])
self.weights = self.feature_extractor.weights + self.classifier.weights
# ---------------------- BACKPROP
self.cost = self.classifier.cross_entropy_cost(self.y)
self.cost = self.classifier.cross_entropy_cost(self.y)
L2_sqr = sum((weight ** 2).sum() for weight in self.weights[::2])
self.grads = T.grad(self.cost + weight_decay * L2_sqr, self.weights)
self.updates = self.adadelta_updates(self.grads, self.weights)
# self.updates = self.nesterov_momentum(self.grads, self.weights)
# --------------------- FUNCTIONS
self.train_model = theano.function([self.x, self.y, Param(self.training_mode, default=1)],
outputs=self.cost,
updates=self.updates)
self.validate_model = theano.function([self.x, self.y, Param(self.training_mode, default=0)],
self.cost)
self.test_model = theano.function([self.x, Param(self.training_mode, default=0)],
self.classifier.p_y_given_x[:, 1])