本文整理汇总了Python中theano.tensor.set_subtensor函数的典型用法代码示例。如果您正苦于以下问题:Python set_subtensor函数的具体用法?Python set_subtensor怎么用?Python set_subtensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_subtensor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
def call(self, X):
if type(X) is not list or len(X) != 2:
raise Exception("SquareAttention must be called on a list of two tensors. Got: " + str(X))
frame, position = X[0], X[1]
# Reshaping the input to exclude the time dimension
frameShape = K.shape(frame)
positionShape = K.shape(position)
(chans, height, width) = frameShape[-3:]
targetDim = positionShape[-1]
frame = K.reshape(frame, (-1, chans, height, width))
position = K.reshape(position, (-1, ) + (targetDim, ))
# Applying the attention
hw = THT.abs_(position[:, 2] - position[:, 0]) * self.scale / 2.0
hh = THT.abs_(position[:, 3] - position[:, 1]) * self.scale / 2.0
position = THT.maximum(THT.set_subtensor(position[:, 0], position[:, 0] - hw), -1.0)
position = THT.minimum(THT.set_subtensor(position[:, 2], position[:, 2] + hw), 1.0)
position = THT.maximum(THT.set_subtensor(position[:, 1], position[:, 1] - hh), -1.0)
position = THT.minimum(THT.set_subtensor(position[:, 3], position[:, 3] + hh), 1.0)
rX = Data.linspace(-1.0, 1.0, width)
rY = Data.linspace(-1.0, 1.0, height)
FX = THT.gt(rX, position[:,0].dimshuffle(0,'x')) * THT.le(rX, position[:,2].dimshuffle(0,'x'))
FY = THT.gt(rY, position[:,1].dimshuffle(0,'x')) * THT.le(rY, position[:,3].dimshuffle(0,'x'))
m = FY.dimshuffle(0, 1, 'x') * FX.dimshuffle(0, 'x', 1)
m = m + self.alpha - THT.gt(m, 0.) * self.alpha
frame = frame * m.dimshuffle(0, 'x', 1, 2)
# Reshaping the frame to include time dimension
output = K.reshape(frame, frameShape)
return output
示例2: pass_edges
def pass_edges(input_idx_t, edge_t, edge_mask_t, counter_t, h_tm1, c_tm1, x):
h_t = h_tm1
c_t = c_tm1
# select the input vector to use for this edge (source)
x_t_i = x[input_idx_t, :]
# zero out the input unless this is a leaf node
x_t_0 = T.switch(T.eq(T.sum(edge_mask_t), 0), x_t_i, x_t_i*0)
# concatenate with the input edge vector
x_t_edge = T.concatenate([x_t_0, edge_t])
# compute attention weights, using a manual softmax
attention_scores = T.dot(self.v_a, T.tanh(T.dot(self.W_h_a, h_tm1))) # (1, n_edges)
# find the max of the unmasked values
max_score = T.max(attention_scores + edge_mask_t * 10000.0) - 10000.0
# exponentiate the differences, masking first to avoid inf, and then to keep only relevant scores
exp_scores = T.exp((attention_scores - max_score) * edge_mask_t) * edge_mask_t
# take the sum, and add one if the mask is all zeros to avoid an inf
exp_scores_sum = T.sum(exp_scores) + T.switch(T.eq(T.sum(edge_mask_t), 0), 1.0, 0.0)
# normalize to compute the weights
weighted_mask = exp_scores / exp_scores_sum
i_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_i) + T.sum(T.dot(self.W_h_i.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_i)
f_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_f) + T.sum(T.dot(self.W_h_f.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_f)
o_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_o) + T.sum(T.dot(self.W_h_o.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_o)
u_t = T.tanh(T.dot(x_t_edge, self.W_x_u) + T.sum(T.dot(self.W_h_u.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_u)
c_temp = i_t * u_t + f_t * T.sum((weighted_mask * c_tm1).T, axis=0)
h_temp = o_t * T.tanh(c_temp)
h_t = T.set_subtensor(h_t[:, counter_t], h_temp)
c_t = T.set_subtensor(c_t[:, counter_t], c_temp)
return h_t, c_t
示例3: pad
def pad(inp, padding):
if all([padval == 0 for padval in pyk.flatten(padding)]):
return inp
if inp.ndim == 4:
# Make a zero tensor of the right shape
zt = T.zeros(shape=(inp.shape[0], inp.shape[1], inp.shape[2]+sum(padding[0]), inp.shape[3]+sum(padding[1])))
# Compute assignment slice
[[ystart, ystop], [xstart, xstop]] = [[padval[0], (-padval[1] if padval[1] != 0 else None)]
for padval in padding]
# Assign subtensor
padded = T.set_subtensor(zt[:, :, ystart:ystop, xstart:xstop], inp)
return padded
elif inp.ndim == 5:
# Make a zero tensor of the right shape
zt = T.zeros(shape=(inp.shape[0], inp.shape[1]+sum(padding[2]), inp.shape[2], inp.shape[3]+sum(padding[0]),
inp.shape[4]+sum(padding[1])))
# Compute assignment slice
[[ystart, ystop], [xstart, xstop], [zstart, zstop]] = [[padval[0], (-padval[1] if padval[1] != 0 else None)]
for padval in padding]
# Assign subtensor
padded = T.set_subtensor(zt[:, zstart:zstop, :, ystart:ystop, xstart:xstop], inp)
return padded
else:
raise NotImplementedError("Padding is only implemented for 4 and 5 dimensional tensors.")
示例4: update_log_p
def update_log_p(skip_idxs,zeros,active,log_p_curr,log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
),
log_p_curr.shape[0]
), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(
_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例5: crop_images
def crop_images(data, image_shape, border_width=8, mode=0):
""" Function used to crop the images by a certain border width.
data : input data, theano 4D tensor
image_shape : 4-tuple, (batch_size, num_channels, image_rows, image_cols)
border_width : border width to be cropped, default value 8
mode : binary, 0 for random, 1 for centered crop.
"""
if (mode == 0):
row_step = image_shape[2] - border_width
col_step = image_shape[3] - border_width
output = T.alloc(0., image_shape[0], image_shape[1], row_step, col_step)
for i in range(image_shape[0]):
begin_idx = numpy.random.randint(border_width)
output = T.set_subtensor(output[i,:,:,:],
data[i,:,begin_idx:(begin_idx+row_step),begin_idx:(begin_idx+col_step)])
return output
else:
row_step = image_shape[2] - border_width
col_step = image_shape[3] - border_width
output = T.alloc(0., image_shape[0], image_shape[1], row_step, col_step)
for i in range(image_shape[0]):
begin_idx = border_width / 2
output = T.set_subtensor(output[i,:,:,:],
data[i,:,begin_idx:(begin_idx+row_step),begin_idx:(begin_idx+col_step)])
return output
示例6: compile_dream
def compile_dream(self, X_train, dream_state, initializer):
self.dream_compiled = True
X_dream_shape = list(X_train.shape)
X_dream_shape[0] = 1
X_dream_shape[1] -= len(dream_state)
X_dream = initializer(tuple(X_dream_shape))
self.X_dream = theano.shared(atleast_4d(np.append(dream_state, X_dream).astype('float32')))
current_layer = self.X_dream
T.set_subtensor(current_layer[:, len(dream_state):, :], Activations.softmax(current_layer[:, len(dream_state):, :]))
for layer, params in zip(self.layers, self.params_shared):
current_layer = layer.get_output(
current_layer, params, testing=True)
y_hat_dream = current_layer.flatten(1)
self.optimizer.build([[self.X_dream.get_value()]])
dream_updates = list(self.optimizer.get_updates([self.X_dream], -y_hat_dream[0]))
original_var = dream_updates[1][0][:, len(dream_state):, :]
new_var = dream_updates[1][1][:, len(dream_state):, :]
dream_updates[1] = (self.X_dream, T.set_subtensor(original_var, new_var))
self.dream_update = theano.function(
inputs=[],
outputs=y_hat_dream,
updates=dream_updates
)
示例7: _step
def _step(c, c_m, hidden, c_matrix):
node_idx = c[:, 0]
left_child_idx = c[:, 1]
right_child_idx = c[:, 2]
all_samples = T.arange(n_samples)
recursive = (
T.dot(hidden[left_child_idx, all_samples, :], self.W)
+ T.dot(hidden[right_child_idx, all_samples, :], self.U)
+ self.b
)
i = T.nnet.sigmoid(_slice(recursive, 0, self.dim_proj))
f1 = T.nnet.sigmoid(_slice(recursive, 1, self.dim_proj))
f2 = T.nnet.sigmoid(_slice(recursive, 2, self.dim_proj))
o = T.nnet.sigmoid(_slice(recursive, 3, self.dim_proj))
c_prime = T.tanh(_slice(recursive, 4, self.dim_proj))
new_c = (
i * c_prime
+ f1 * c_matrix[left_child_idx, all_samples, :]
+ f2 * c_matrix[right_child_idx, all_samples, :]
)
new_c_masked = c_m[:, None] * new_c + (1.0 - c_m[:, None]) * c_matrix[node_idx, all_samples, :]
new_h = o * T.tanh(new_c_masked)
new_h_masked = c_m[:, None] * new_h + (1.0 - c_m[:, None]) * hidden[node_idx, all_samples, :]
return (
T.set_subtensor(hidden[node_idx, all_samples], new_h_masked),
T.set_subtensor(c_matrix[node_idx, all_samples], new_c_masked),
)
示例8: create_adam_updates
def create_adam_updates(updates, params, gparams, gsums, xsums, lr, eps, beta1, beta2):
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omb1_t = 1.0 - beta1**i_t
omb2_t = 1.0 - beta2**i_t
lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
for p, g, m, v in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
m_sub = m[indexes]
v_sub = v[indexes]
m_t = beta1*m_sub + (1.0-beta1)*g
v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = T.set_subtensor(m_sub, m_t)
updates[v] = T.set_subtensor(v_sub, v_t)
updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
else:
m_t = beta1*m + (1.0-beta1)*g
v_t = beta2*v + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = m_t
updates[v] = v_t
updates[p] = p - lr_t*g_t
updates[i] = i_t
示例9: f_score
def f_score(self,y,label):
#print dir(x)
y=T.cast(y,'int32')
new_y_pred=T.sub(self.y_pred,label)
new_y=T.sub(y,label)
pre_pos_num=new_y_pred.shape[0]-new_y_pred.nonzero()[0].shape[0]#预测的正例个数
real_pos=new_y.shape[0]-new_y.nonzero()[0].shape[0]
new_y_pred=T.set_subtensor(new_y_pred[new_y_pred.nonzero()[0]],1)
new_y=T.set_subtensor(new_y[new_y.nonzero()[0]],2)
r=T.neq(new_y_pred,new_y)
true_pos=self.y_pred.shape[0]-r.sum()
#printed_recall=theano.printing.Print('rec:')(pre_pos_num)
#printed=theano.printing.Print('pre:')(real_pos)
precision=true_pos / (T.cast(pre_pos_num,'float32')+0.0000001)
recall=true_pos / (T.cast(real_pos,'float32')+0.0000001)
f_score=(2 * precision * recall) / (precision + recall)
return f_score,precision,recall
示例10: update_stack
def update_stack(stack_t, shift_value, reduce_value, mask, model_dim):
"""
Compute the new value of the given stack.
This performs stack shifts and reduces in parallel, and somewhat
wastefully. It accepts a precomputed reduce result (in `reduce_value`) and
a precomputed shift value `shift` for all examples, and switches between
the two outcomes based on the per-example value of `mask`.
Args:
stack_t: Current stack value
shift_value: Batch of values to be shifted
reduce_value: Batch of reduce results
mask: Batch of booleans: 1 if reduce, 0 if shift
model_dim: The dimension of shift_value and reduce_value.
"""
# Build two copies of the stack batch: one where every stack has received
# a shift op, and one where every stack has received a reduce op.
# Copy 1: Shift.
stack_s = T.set_subtensor(stack_t[:, 0, :model_dim], shift_value)
stack_s = T.set_subtensor(stack_s[:, 1:], stack_t[:, :-1])
# Copy 2: Reduce.
stack_r = T.set_subtensor(stack_t[:, 0, :model_dim], reduce_value)
stack_r = T.set_subtensor(stack_r[:, 1:-1], stack_t[:, 2:])
# Make sure mask broadcasts over all dimensions after the first.
mask = mask.dimshuffle(0, "x", "x")
mask = T.cast(mask, dtype=theano.config.floatX)
stack_next = mask * stack_r + (1. - mask) * stack_s
return stack_next
示例11: sample_update
def sample_update(self, data):
proposal_samples, log_proposal_probs=self.proposal_distrib
printing=False
if printing:
log_transition_probs=theano.printing.Print('1 log transition probs update')(self.true_log_transition_probs(self.current_state, proposal_samples))
log_observation_probs=theano.printing.Print('2 log observation probs update')(self.true_log_observation_probs(proposal_samples, data.dimshuffle('x',0)))
log_unnorm_weights=theano.printing.Print('3 log unnorm weights update')(log_transition_probs + log_observation_probs - log_proposal_probs)
log_unnorm_weights_center=theano.printing.Print('4 log unnorm weights center update')(log_unnorm_weights-T.max(log_unnorm_weights))
unnorm_weights=theano.printing.Print('5 unnorm weights update')(T.exp(log_unnorm_weights_center)*self.current_weights)
normalizer=theano.printing.Print('6 normalizer update')(T.sum(unnorm_weights))
else:
log_transition_probs=self.true_log_transition_probs(self.current_state, proposal_samples)
log_observation_probs=self.true_log_observation_probs(proposal_samples, data.dimshuffle('x',0))
log_unnorm_weights=log_transition_probs + log_observation_probs - log_proposal_probs
log_unnorm_weights_center=log_unnorm_weights-T.max(log_unnorm_weights)
unnorm_weights=T.exp(log_unnorm_weights_center)*self.current_weights
normalizer=T.sum(unnorm_weights)
weights=unnorm_weights/normalizer
updates=OrderedDict()
updates[self.weights]=T.set_subtensor(self.next_weights, weights)
updates[self.particles]=T.set_subtensor(self.next_state, proposal_samples)
updates[self.time_counter]=self.time_counter+1
return updates
示例12: get_learn_func
def get_learn_func(self):
"""
Returns a theano function that takes an action and a reward,
and updates the agent based on this experience.
"""
a = T.iscalar()
r = T.scalar()
old_estimated_reward = self.estimated_rewards[a]
old_observation_count = self.observation_counts[a]
observation_count = old_observation_count + 1.
delta = r - old_estimated_reward
new_estimated_reward = old_estimated_reward + delta / observation_count
new_estimated_rewards = T.set_subtensor(self.estimated_rewards[a],
new_estimated_reward)
new_observation_counts = T.set_subtensor(self.observation_counts[a], observation_count)
updates = OrderedDict([
(self.estimated_rewards, new_estimated_rewards),
(self.observation_counts, new_observation_counts)
])
rval = function([a, r], updates=updates)
return rval
示例13: create_valid_error
def create_valid_error(self):
#self.valid_error=T.mean(T.abs_(self.predictions - self.pm25target[:,-self.steps:]),axis=0)
pred=T.zeros_like(self.predictions)
pred=T.set_subtensor(pred[:,0],self.pm25in[:,1,0]+self.pm25target[:,-self.steps+0])#self.predictions[:,0])
for i in xrange(1,self.steps):
pred=T.set_subtensor(pred[:,i],pred[:,i-1]+self.pm25target[:,-self.steps+i])#self.predictions[:,i])
self.valid_error=T.mean(T.abs_(pred - self.pm25in[:,-self.steps:,0]),axis=0)
示例14: pass_edges
def pass_edges(input_idx_t, edge_t, edge_mask_t, counter_t, h_tm1, c_tm1, x):
h_t = h_tm1
c_t = c_tm1
# select the input vector to use for this edge (source)
input = x[input_idx_t, :]
# zero out the input unless this is a leaf node
input = T.switch(T.eq(T.sum(edge_mask_t), 0), input, input*0)
i_t = T.nnet.sigmoid(T.dot(input, self.W_x_i) + T.sum(T.dot(self.W_h_i.T, (edge_mask_t * h_tm1)).T, axis=0) + self.b_h_i)
f_t = T.nnet.sigmoid(T.dot(input, self.W_x_f) + T.sum(T.dot(self.W_h_f.T, (edge_mask_t * h_tm1)).T, axis=0) + self.b_h_f)
o_t = T.nnet.sigmoid(T.dot(input, self.W_x_o) + T.sum(T.dot(self.W_h_o.T, (edge_mask_t * h_tm1)).T, axis=0) + self.b_h_o)
u_t = T.tanh(T.dot(input, self.W_x_u) + T.sum(T.dot(self.W_h_u.T, (edge_mask_t * h_tm1)).T, axis=0) + self.b_h_u)
c_temp = i_t * u_t + f_t * T.sum((edge_mask_t * c_tm1).T, axis=0)
h_temp = o_t * T.tanh(c_temp)
# pass the output of above through another LSTM node for the edge
ie_t = T.nnet.sigmoid(T.dot(edge_t, self.W_e_i) + T.dot(h_temp, self.W_eh_i) + self.b_e_i)
fe_t = T.nnet.sigmoid(T.dot(edge_t, self.W_e_f) + T.dot(h_temp, self.W_eh_f) + self.b_e_f)
oe_t = T.nnet.sigmoid(T.dot(edge_t, self.W_e_o) + T.dot(h_temp, self.W_eh_o) + self.b_e_o)
ue_t = T.tanh(T.dot(edge_t, self.W_e_u) + T.dot(h_temp, self.W_eh_u) + self.b_e_u)
ce_temp = ie_t * ue_t + fe_t * c_temp
he_temp = oe_t * T.tanh(ce_temp)
h_t = T.set_subtensor(h_t[:, counter_t], he_temp)
c_t = T.set_subtensor(c_t[:, counter_t], ce_temp)
return h_t, c_t
示例15: T_subspacel1_slow_shrinkage_conv
def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
features = T.reshape(T.transpose(a),featshp,ndim=4)
amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
div = T.set_subtensor(div[1:-1,:,:,:], -d2)
div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
subspacel1_prox = T.zeros_like(features)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)
reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
return reshape_subspacel1_prox