本文整理匯總了Python中theano.tensor.addbroadcast方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.addbroadcast方法的具體用法?Python tensor.addbroadcast怎麽用?Python tensor.addbroadcast使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.addbroadcast方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_output_for
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def get_output_for(self, inputs, **kwargs):
inputs = autocrop(inputs, self.cropping)
# modify broadcasting pattern.
if self.broadcastable is not None:
for n, broadcasting_dim in enumerate(self.broadcastable):
for dim, broadcasting in enumerate(broadcasting_dim):
if broadcasting:
inputs[n] = T.addbroadcast(inputs[n], dim)
output = None
for input in inputs:
if output is not None:
output = self.merge_function(output, input)
else:
output = input
return output
# Definition of the network
示例2: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def __init__(self, incomings, parameters, layer_num,
W=lasagne.init.Normal(0.01),
num_features=None,
**kwargs):
super(DCNNLayer, self).__init__(incomings, **kwargs)
self.parameters = parameters
if num_features is None:
self.num_features = self.parameters.num_features
else:
self.num_features = num_features
self.W = T.addbroadcast(
self.add_param(W,
(1, parameters.num_hops + 1, self.num_features), name='DCNN_W_%d' % layer_num), 0)
self.nonlinearity = params.nonlinearity_map[self.parameters.dcnn_nonlinearity]
示例3: create_updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def create_updates(self, grads, params, alpha, opt_alg, opt_params):
# call super-class to generate SGD/ADAM updates
grad_updates = Model.create_updates(self, grads, params, alpha, opt_alg, opt_params)
# create updates for centering signal
# load neural net outputs (probabilities have been precomputed)
_, _, _, l_cv, c, v = self.network
log_pxz, log_qz_given_x = self.log_pxz, self.log_qz_given_x
cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)
# compute learning signals
l = log_pxz - log_qz_given_x - cv
l_avg, l_var = l.mean(), l.var()
c_new = 0.8*c + 0.2*l_avg
v_new = 0.8*v + 0.2*l_var
# compute update for centering signal
cv_updates = {c : c_new, v : v_new}
return OrderedDict( grad_updates.items() + cv_updates.items() )
示例4: create_updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def create_updates(self, grads, params, alpha, opt_alg, opt_params):
# call super-class to generate SGD/ADAM updates
grad_updates = Model.create_updates(self, grads, params, alpha, opt_alg, opt_params)
# create updates for centering signal
# load neural net outputs (probabilities have been precomputed)
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, l_cv, c, v = self.network
# load neural net outputs (probabilities have been precomputed)
log_pxz, log_px_given_z, log_pz = self.log_pxz, self.log_px_given_z, self.log_pz
log_qz_given_x = self.log_qz_given_x
cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)
# compute learning signals
l = log_px_given_z + log_pz - log_qz_given_x - cv
l_avg, l_var = l.mean(), l.var()
c_new = 0.8*c + 0.2*l_avg
v_new = 0.8*v + 0.2*l_var
# compute update for centering signal
cv_updates = {c : c_new, v : v_new}
return OrderedDict( grad_updates.items() + cv_updates.items() )
示例5: test_sharpening
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def test_sharpening():
weight_var, gamma_var = T.tensor3s('weight', 'gamma')
gamma_var = T.addbroadcast(gamma_var, 2)
w = T.pow(weight_var + 1e-6, gamma_var)
w /= T.sum(w, axis=2).dimshuffle(0, 1, 'x')
sharpening_fn = theano.function([weight_var, gamma_var], w)
weights = np.random.rand(16, 4, 128)
gamma = np.random.rand(16, 4, 1)
weight_t = sharpening_fn(weights, gamma)
weight_t_manual = np.zeros_like(weight_t)
for i in range(16):
for j in range(4):
for k in range(128):
weight_t_manual[i, j, k] = np.power(weights[i, j, k] + 1e-6, gamma[i, j])
weight_t_manual[i, j] /= np.sum(weight_t_manual[i, j])
assert weight_t.shape == (16, 4, 128)
assert np.allclose(weight_t, weight_t_manual)
示例6: setup
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def setup(self, bottom, top):
if len(bottom) != 1:
raise Exception("Need two inputs to compute distance.")
preds = T.ftensor4()
top_diff = T.ftensor4()
preds_max = T.addbroadcast(T.max(preds, axis=1, keepdims=True), 1)
preds_exp = np.exp(preds - preds_max)
probs = preds_exp / T.addbroadcast(T.sum(preds_exp, axis=1, keepdims=True), 1) + min_prob
probs = probs / T.sum(probs, axis=1, keepdims=True)
probs_sum = T.sum(probs * top_diff)
self.forward_theano = theano.function([preds], probs)
self.backward_theano = theano.function([preds, top_diff], T.grad(probs_sum, preds))
示例7: sequence_iteration
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def sequence_iteration(self, output, mask,use_dropout=0,dropout_value=0.5):
dot_product = T.dot(output , self.t_w_out)
net_o = T.add( dot_product , self.t_b_out )
ex_net = T.exp(net_o)
sum_net = T.sum(ex_net, axis=2, keepdims=True)
softmax_o = ex_net / sum_net
mask = T.addbroadcast(mask, 2) # to do nesseccary?
output = T.mul(mask, softmax_o) + T.mul( (1. - mask) , 1e-6 )
return output #result
###### Linear Layer
########################################
示例8: t_forward_step
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def t_forward_step(self,mask, rzup_in_sig, h_pre, u_rz, u_up, t_n_out): #u_r, u_z,
signal_act = self.activation
gate_act = self.sigmoid()
preact = T.dot( h_pre, u_rz)
r = gate_act( T.add( rzup_in_sig[:, 0:t_n_out] , preact[:, 0:t_n_out] )) #T.dot( h_pre, u_r) ) )
z = gate_act( T.add( rzup_in_sig[:, t_n_out:2 * t_n_out] , preact[:, t_n_out:2 * t_n_out] )) #T.dot(h_pre, u_z) ))
h_update = signal_act( T.add( rzup_in_sig[:, 2*t_n_out:3*t_n_out] , T.dot( T.mul( h_pre, r), u_up) ))
h_new = T.add( (1.-z) * h_update , z * h_pre )
mask = T.addbroadcast(mask, 1)
out_sig = T.add( mask * h_new , (1. - mask) * h_pre )
return out_sig
示例9: squeeze
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
x = T.addbroadcast(x, axis)
return T.squeeze(x)
示例10: get_padded_shuffled_mask
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def get_padded_shuffled_mask(self, train, X, pad=0):
mask = self.get_input_mask(train)
if mask is None:
mask = T.ones_like(X.sum(axis=-1)) # is there a better way to do this without a sum?
# mask is (nb_samples, time)
mask = T.shape_padright(mask) # (nb_samples, time, 1)
mask = T.addbroadcast(mask, -1) # (time, nb_samples, 1) matrix.
mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1)
if pad > 0:
# left-pad in time with 0
padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
mask = T.concatenate([padding, mask], axis=0)
return mask.astype('int8')
示例11: grad
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.dtype not in continuous_dtypes:
return [x.zeros_like(dtype=theano.config.floatX)]
if self.structured:
if self.axis is None:
r = gz * theano.sparse.sp_ones_like(x)
elif self.axis == 0:
r = col_scale(theano.sparse.sp_ones_like(x), gz)
elif self.axis == 1:
r = row_scale(theano.sparse.sp_ones_like(x), gz)
else:
raise ValueError('Illegal value for self.axis.')
else:
o_format = x.format
x = dense_from_sparse(x)
if _is_sparse_variable(gz):
gz = dense_from_sparse(gz)
if self.axis is None:
r = tensor.second(x, gz)
else:
ones = tensor.ones_like(x)
if self.axis == 0:
r = tensor.addbroadcast(gz.dimshuffle('x', 0), 0) * ones
elif self.axis == 1:
r = tensor.addbroadcast(gz.dimshuffle(0, 'x'), 1) * ones
else:
raise ValueError('Illegal value for self.axis.')
r = SparseFromDense(o_format)(r)
return [r]
示例12: get_output_for
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def get_output_for(self, input, style=None, **kwargs):
mean = input.mean(self.axes)
inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))
pattern = [0, 1, 'x', 'x']
if style == None:
pattern_params = ['x', 0, 'x', 'x']
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern_params)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern_params)
else:
pattern_params = pattern
beta = 0 if self.beta is None else self.beta[style].dimshuffle(pattern_params)
gamma = 1 if self.gamma is None else self.gamma[style].dimshuffle(pattern_params)
# if self.beta is not None:
# beta = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(beta, 0), beta)
# if self.gamma is not None:
# gamma = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(gamma, 0), gamma)
mean = mean.dimshuffle(pattern)
inv_std = inv_std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * inv_std) + beta
return normalized
示例13: get_t_weights
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def get_t_weights(self, t):
"""
Generate vector of weights allowing selection of current timestep.
(if t is not an integer, the weights will linearly interpolate)
"""
n_seg = self.trajectory_length
t_compare = T.arange(n_seg, dtype=theano.config.floatX).reshape((1,n_seg))
diff = abs(T.addbroadcast(t,1) - T.addbroadcast(t_compare,0))
t_weights = T.max(T.join(1, (-diff+1).reshape((n_seg,1)), T.zeros((n_seg,1))), axis=1)
return t_weights.reshape((-1,1))
示例14: output
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def output(self, input=None, *args, **kwargs):
if input == None:
input = self.input_layer.output(*args, **kwargs)
if self.flip_conv_dims: # flip the conv dims to get a faster convolution when the filter_height is 1.
flipped_input_shape = (self.input_shape[1], self.input_shape[0], self.input_shape[2])
flipped_input = input.dimshuffle(1, 0, 2)
conved = sconv2d(flipped_input, self.W, subsample=(1, self.stride), image_shape=flipped_input_shape, filter_shape=self.filter_shape)
conved = T.addbroadcast(conved, 0) # else dimshuffle complains about dropping a non-broadcastable dimension
conved = conved.dimshuffle(2, 1, 3)
else:
conved = sconv2d(input, self.W, subsample=(1, self.stride), image_shape=self.input_shape, filter_shape=self.filter_shape)
conved = conved.dimshuffle(0, 1, 3) # gets rid of the obsolete filter height dimension
return self.nonlinearity(conved + self.b.dimshuffle('x', 0, 'x'))
# def dropoutput_train(self):
# p = self.dropout
# input = self.input_layer.dropoutput_train()
# if p > 0.:
# srng = RandomStreams()
# input = input * srng.binomial(self.input_layer.get_output_shape(), p=1 - p, dtype='int32').astype('float32')
# return self.output(input)
# def dropoutput_predict(self):
# p = self.dropout
# input = self.input_layer.dropoutput_predict()
# if p > 0.:
# input = input * (1 - p)
# return self.output(input)
示例15: _outer_substract
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import addbroadcast [as 別名]
def _outer_substract(self, x, y):
z = x.dimshuffle(0, 1, 'x')
z = T.addbroadcast(z, 2)
return (z - y.T).dimshuffle(0, 2, 1)