本文整理汇总了Python中theano.tensor.set_subtensor方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.set_subtensor方法的具体用法?Python tensor.set_subtensor怎么用?Python tensor.set_subtensor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.set_subtensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ctc_update_log_p
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例2: adam
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def adam(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
v1 = np.float32(self.decay)
v2 = np.float32(1.0 - self.decay)
acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
meang = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
countt = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
if sample_idx is None:
acc_new = v1 * acc + v2 * grad ** 2
meang_new = v1 * meang + v2 * grad
countt_new = countt + 1
updates[acc] = acc_new
updates[meang] = meang_new
updates[countt] = countt_new
else:
acc_s = acc[sample_idx]
meang_s = meang[sample_idx]
countt_s = countt[sample_idx]
acc_new = v1 * acc_s + v2 * grad ** 2
meang_new = v1 * meang_s + v2 * grad
countt_new = countt_s + 1.0
updates[acc] = T.set_subtensor(acc_s, acc_new)
updates[meang] = T.set_subtensor(meang_s, meang_new)
updates[countt] = T.set_subtensor(countt_s, countt_new)
return (meang_new / (1 - v1 ** countt_new)) / (T.sqrt(acc_new / (1 - v1 ** countt_new)) + epsilon)
示例3: adadelta
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def adadelta(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
v1 = np.float32(self.decay)
v2 = np.float32(1.0 - self.decay)
acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
upd = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
if sample_idx is None:
acc_new = acc + grad ** 2
updates[acc] = acc_new
grad = T.sqrt(upd + epsilon) * grad
upd_new = v1 * upd + v2 * grad ** 2
updates[upd] = upd_new
else:
acc_s = acc[sample_idx]
acc_new = acc_s + grad ** 2
updates[acc] = T.set_subtensor(acc_s, acc_new)
upd_s = upd[sample_idx]
upd_new = v1 * upd_s + v2 * grad ** 2
updates[upd] = T.set_subtensor(upd_s, upd_new)
grad = T.sqrt(upd_s + epsilon) * grad
gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
return grad / gradient_scaling
示例4: set_output
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def set_output(self):
output_shape = self._output_shape
padding = self._padding
unpool_size = self._unpool_size
unpooled_output = tensor.alloc(0.0, # Value to fill the tensor
output_shape[0],
output_shape[1] + 2 * padding[0],
output_shape[2],
output_shape[3] + 2 * padding[1],
output_shape[4] + 2 * padding[2])
unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[
1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[
1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]],
self._prev_layer.output)
self._output = unpooled_output
示例5: inpainting_sample_and_noise
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def inpainting_sample_and_noise(self, X, default_input_include_prob=1., default_input_scale=1.):
# Very hacky! Specifically for inpainting right half of CIFAR-10 given left half
# assumes X is b01c
assert X.ndim == 4
input_space = self.mlp.get_input_space()
n = input_space.get_total_dimension()
image_size = input_space.shape[0]
half_image = int(image_size / 2)
data_shape = (X.shape[0], image_size, half_image, input_space.num_channels)
noise = self.theano_rng.normal(size=data_shape, dtype='float32')
Xg = T.set_subtensor(X[:,:,half_image:,:], noise)
sampled_part, noise = self.mlp.dropout_fprop(Xg, default_input_include_prob=default_input_include_prob, default_input_scale=default_input_scale), noise
sampled_part = sampled_part.reshape(data_shape)
rval = T.set_subtensor(X[:, :, half_image:, :], sampled_part)
return rval, noise
示例6: test_wrong_broadcast
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def test_wrong_broadcast(self):
a = tt.col()
increment = tt.vector()
# These symbolic graphs legitimate, as long as increment has exactly
# one element. So it should fail at runtime, not at compile time.
rng = numpy.random.RandomState(utt.fetch_seed())
def rng_randX(*shape):
return rng.rand(*shape).astype(theano.config.floatX)
for op in (tt.set_subtensor, tt.inc_subtensor):
for base in (a[:], a[0]):
out = op(base, increment)
f = theano.function([a, increment], out)
# This one should work
f(rng_randX(3, 1), rng_randX(1))
# These ones should not
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(2))
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(3))
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(0))
示例7: expand_empty
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def expand_empty(tensor_var, size):
"""
Transforms the shape of a tensor from (d1, d2 ... ) to ( d1+size, d2, ..)
by adding uninitialized memory at the end of the tensor.
"""
if size == 0:
return tensor_var
shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)]
new_shape = [size + shapes[0]] + shapes[1:]
empty = tensor.AllocEmpty(tensor_var.dtype)(*new_shape)
ret = tensor.set_subtensor(empty[:shapes[0]], tensor_var)
ret.tag.nan_guard_mode_check = False
return ret
示例8: test_advset_subtensor1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def test_advset_subtensor1():
""" Test GPU version of set_subtensor on vectors (uses GpuAdvancedIncSubtensor1) """
shp = (10,)
shared = cuda.shared_constructor
xval = numpy.arange(shp[0], dtype='float32').reshape(shp) + 1
idxs = numpy.array([0,2,5,7,3], dtype='int32')
yval = numpy.ones(len(idxs), dtype='float32')*10
x = shared(xval, name='x')
y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y')
expr = T.advanced_set_subtensor1(x, y, idxs)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[idxs] = yval
utt.assert_allclose(rval, rep)
示例9: test_advset_subtensor1_2d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def test_advset_subtensor1_2d():
""" Test GPU version of set_subtensor on matrices (uses GpuAdvancedIncSubtensor1_dev20 if compute capability >= 2.0) """
shp = (10,5)
shared = cuda.shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
idxs = numpy.array([0,2,5,7,3], dtype='int32')
yval = numpy.ones((len(idxs), shp[1]), dtype='float32')*10
x = shared(xval, name='x')
y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y')
expr = T.advanced_set_subtensor1(x, y, idxs)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[idxs] = yval
utt.assert_allclose(rval, rep)
示例10: adagrad_update
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def adagrad_update(self, cost, learning_rate, eps=1e-8):
params = [ p if p != self.slices else self.EMB for p in self.params ]
accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX))
for p in params ]
gparams = [ T.grad(cost, param) for param in self.params ]
self.gparams = gparams
updates = [ ]
for param, gparam, acc in zip(self.params, gparams, accumulators):
if param == self.slices:
acc_slices = acc[self.x.flatten()]
new_acc_slices = acc_slices + gparam**2
updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) )
updates.append( (self.EMB, T.inc_subtensor(param,
- learning_rate * gparam / T.sqrt(new_acc_slices+eps))) )
else:
new_acc = acc + gparam**2
updates.append( (acc, new_acc) )
updates.append( (param, param - learning_rate * gparam /
T.sqrt(new_acc + eps)) )
return updates
示例11: ConvByPattern
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def ConvByPattern(x, patterns, mask=None):
W = np.transpose(patterns, (3, 0, 1, 2))
out2 = T.nnet.conv2d(x.dimshuffle(0, 3, 1, 2), W, filter_shape=W.shape, border_mode='half')
if mask is not None:
## mask has shape (batchSize, #rows_to_be_masked, nCols)
## a subtensor of out2 along the horiz direction
out2_sub_horiz = out2[:, :, :mask.shape[1], :]
mask_horiz = mask.dimshuffle(0, 'x', 1, 2)
out3 = T.set_subtensor(out2_sub_horiz, T.mul(out2_sub_horiz, mask_horiz) )
## a subtensor of out3 along the vertical direction
out3_sub_vertical = out3[:, :, :, :mask.shape[1] ]
mask_vertical = mask.dimshuffle(0, 'x', 2, 1)
y = T.set_subtensor(out3_sub_vertical, T.mul(out3_sub_vertical, mask_vertical) )
else:
y = out2
y = y.dimshuffle(0, 2, 3, 1)
return y/np.prod(patterns.shape[1:3])
示例12: combine_fragments_to_dense_bxcyz
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def combine_fragments_to_dense_bxcyz(self, tensor, sh):
""" expected shape: (batch, x, channels, y, z)"""
ttensor = tensor # be same shape as result, no significant time cost
output_stride = self.output_stride
if isinstance(output_stride, list) or isinstance(output_stride, tuple):
example_stride = np.prod(output_stride)#**3
else:
example_stride = output_stride**3
output_stride = np.asarray((output_stride,)*3)
zero = np.array((0), dtype=theano.config.floatX)
embedding = T.alloc( zero, 1, sh[1]*output_stride[0], sh[2], sh[3]*output_stride[1], sh[4]*output_stride[2]) # first arg. is fill-value (0 in this case) and not an element of the shape
ix = offset_map(output_stride)
print " output_stride",output_stride
print " example_stride",example_stride
for i,(n,m,k) in enumerate(ix):
embedding = T.set_subtensor(embedding[:,n::output_stride[0],:,m::output_stride[1],k::output_stride[2]], ttensor[i::example_stride])
return embedding
示例13: depool
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def depool(X, factor=2):
"""
Luke perforated upsample: http://www.brml.org/uploads/tx_sibibtex/281.pdf
"""
output_shape = [
X.shape[1],
X.shape[2]*factor,
X.shape[3]*factor
]
stride = X.shape[2]
offset = X.shape[3]
in_dim = stride * offset
out_dim = in_dim * factor * factor
upsamp_matrix = T.zeros((in_dim, out_dim))
rows = T.arange(in_dim)
cols = rows*factor + (rows/stride * factor * offset)
upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))
up_flat = T.dot(flat, upsamp_matrix)
upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2]))
return upsamp
示例14: temporal_padding
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def temporal_padding(x, padding=(1, 1)):
"""Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Apologies for the inane API, but Theano makes this
really hard.
"""
assert len(padding) == 2
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + padding[0] + padding[1],
input_shape[2])
output = T.zeros(output_shape)
result = T.set_subtensor(output[:, padding[0]:x.shape[1] + padding[0], :], x)
if hasattr(x, '_keras_shape'):
result._keras_shape = (x._keras_shape[0],
x._keras_shape[1] + py_sum(padding),
x._keras_shape[2])
return result
示例15: rmsprop
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import set_subtensor [as 别名]
def rmsprop(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
v1 = np.float32(self.adapt_params[0])
v2 = np.float32(1.0 - self.adapt_params[0])
acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
if sample_idx is None:
acc_new = v1 * acc + v2 * grad ** 2
updates[acc] = acc_new
else:
acc_s = acc[sample_idx]
# acc_new = v1 * acc_s + v2 * grad ** 2 #Faster, but inaccurate when an index occurs multiple times
# updates[acc] = T.set_subtensor(acc_s, acc_new) #Faster, but inaccurate when an index occurs multiple times
updates[acc] = T.inc_subtensor(T.set_subtensor(acc_s, acc_s * v1)[sample_idx],
v2 * grad ** 2) # Slower, but accurate when an index occurs multiple times
acc_new = updates[acc][sample_idx] # Slower, but accurate when an index occurs multiple times
gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
return grad / gradient_scaling