本文整理汇总了Python中theano.tensor.tile函数的典型用法代码示例。如果您正苦于以下问题:Python tile函数的具体用法?Python tile怎么用?Python tile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tile函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fwd
def fwd(self, x, V, A, L):
"""
x : signal
V : eigenvectors
A : area
L : eigenvalues
"""
V = V[:,:self.K]
L = L[:self.K]
L = L.dimshuffle('x','x',0)
rho = T.sqrt(T.sum(A))
# Q x 1 x K, a window for each input function
ghat = self.activation_interp(
T.batched_dot(T.tile(L, [self.nin,1,1]), self.Winterp))
# Q x K x N
V_ = T.tile(V.dimshuffle('x',1,0), [self.nin, 1, 1])
# Q x K x N
tmp = (ghat * V).dimshuffle(0,2,1)
# Q x N x N
transl = rho * T.batched_dot(V_.dimshuffle(0,2,1), tmp)
transl = A.dimshuffle('x',0,'x') * transl
# Q x K x N
tmp = (V.dimshuffle(0,'x',1) * x.dimshuffle(0,1,'x')).dimshuffle(1,2,0)
# Q x K x N
desc = rho * T.batched_dot(tmp, transl)
desc = T.abs_(desc)
desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b)
示例2: setup_generate
def setup_generate(self):
# dimensions: (batch, time, 12)
chord_types = T.btensor3()
# dimensions: (batch, time)
chord_roots = T.imatrix()
n_batch, n_time = chord_roots.shape
specs = [lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(encoding.STARTING_POSITION, np.int32), (n_batch)),
start_out=T.tile(encoding.initial_encoded_form(), (n_batch,1)),
timestep=T.tile(T.arange(n_time), (n_batch,1)),
cur_chord_type=chord_types,
cur_chord_root=chord_roots,
deterministic_dropout=True )
for lstmstack, encoding in zip(self.lstmstacks, self.encodings)]
updates, all_chosen, all_probs, indiv_probs = helper_generate_from_spec(specs, self.lstmstacks, self.encodings, self.srng, n_batch, n_time, self.bounds, self.normalize_artic_only)
self.generate_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=all_chosen,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
self.generate_visualize_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=[all_chosen, all_probs] + indiv_probs,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
示例3: make_gaussian_filter
def make_gaussian_filter(self):
W_shape = self.get_W_shape()
k = self.filter_size[0]
k_low = int(np.floor(-(k-1)/2))
k_high = k_low+k
W_std = T.exp(self.W_logstd)
std_array = T.tile(
W_std.dimshuffle('x', 0, 'x'),
(self.num_input_channels, 1, k)
)
x = np.arange(k_low, k_high).reshape((1, 1, -1))
x = T.tile(
x, (self.num_input_channels, self.num_input_channels, 1)
).astype(floatX)
p1 = (1./(np.sqrt(2.*np.pi))).astype(floatX)
p2 = np.asarray(2., dtype=floatX)
gf = (p1/std_array)*T.exp(-x**2/(p2*(std_array**2)))
# gf = gf.astype(theano.config.floatX)
mask = np.zeros(W_shape)
rg = np.arange(self.num_input_channels)
mask[rg, rg, :] = 1
mask = mask.astype(floatX)
gf = gf*mask
return gf
示例4: output_probabilistic
def output_probabilistic(self, m_x, v_x):
m_linear = T.dot(m_x, self.m_W[ 0, :, : ]) + T.tile(self.m_b[ 0, :, : ], [ m_x.shape[ 0 ], 1 ])
v_linear = T.dot(m_x**2, self.v_W[ 0, :, : ]) + T.dot(v_x, self.m_W[ 0, :, : ]**2) + T.dot(v_x, self.v_W[ 0, :, : ]) + \
T.tile(self.v_b[ 0, :, : ], [ m_x.shape[ 0 ], 1 ])
if not self.output_layer:
# We compute the mean and variance after the ReLU activation
alpha = m_linear / T.sqrt(v_linear)
gamma = Network_layer.gamma(-alpha)
gamma_robust = -alpha - 1.0 / alpha + 2.0 / alpha**3
gamma_final = T.switch(T.lt(-alpha, T.fill(alpha, 30)), gamma, gamma_robust)
v_aux = m_linear + T.sqrt(v_linear) * gamma_final
m_a = Network_layer.n_cdf(alpha) * v_aux
v_a = m_a * v_aux * Network_layer.n_cdf(-alpha) + Network_layer.n_cdf(alpha) * v_linear * (1 - gamma_final * (gamma_final + alpha))
return (m_a, v_a)
else:
return (m_linear, v_linear)
开发者ID:Riashat,项目名称:Active-Learning-Bayesian-Convolutional-Neural-Networks,代码行数:25,代码来源:network_layer.py
示例5: apply
def apply(self, v):
[h_vals, _], _ = theano.scan(fn=self.step,
sequences = v,
outputs_info = [T.tile(self.h0, (v.shape[1], 1)),
T.tile(self.c0, (v.shape[1], 1))]
)
return h_vals
示例6: lcn_3d_input
def lcn_3d_input(data, kernel_shape, n_maps):
"""
:param data: [examples, depth, filters, height, width]
:param kernel_shape: int
:param n_maps: int
:return: new_x: [examples, depth, filters, height, width]
"""
# create symbolic variable for the input data
ftensor5 = T.TensorType('float32', [False] * 5)
x = ftensor5()
# # determine the number of maps
# n_maps = data.shape[2]
# create 3d filter that spans across all channels / feature maps
# todo: kernel is not really in 3d; need 3d implementation instead of 2d repeated across third dimension
# todo: alternative is to keep 2d kernel and extend short range given data size in z-plane; change first kernel_sh.
filter_shape = (1, kernel_shape[0], n_maps, kernel_shape[1], kernel_shape[2])
filters = np.resize(gaussian_filter(kernel_shape[1]), filter_shape)
filters = filters / np.sum(filters)
filters = sharedX(filters)
# convolve filter with input signal
convolution_out = conv3d(
signals=x,
filters=filters,
signals_shape=data.shape,
filters_shape=filter_shape,
border_mode='valid'
)
# for each pixel, remove mean of 9x9 neighborhood
mid_0 = int(np.floor(kernel_shape[0] / 2.))
mid_1 = int(np.floor(kernel_shape[1] / 2.))
mid_2 = int(np.floor(kernel_shape[2] / 2.))
mean = T.tile(convolution_out, (1, 1, n_maps, 1, 1))
padded_mean = T.zeros_like(x)
padded_mean = T.set_subtensor(padded_mean[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], mean)
centered_data = data - padded_mean
# scale down norm of 9x9 patch if norm is bigger than 1
sum_sqr_xx = conv3d(signals=T.sqr(data), filters=filters)
denominator = T.tile(T.sqrt(sum_sqr_xx), (1, 1, n_maps, 1, 1))
padded_denominator = T.ones_like(x)
padded_denominator = T.set_subtensor(
padded_denominator[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], denominator
)
per_img_mean = padded_denominator.mean(axis=[1, 2, 3, 4])
divisor = T.largest(
per_img_mean.dimshuffle(0, 'x', 'x', 'x', 'x'),
padded_denominator
)
new_x = centered_data / T.maximum(1., divisor)
# compile theano function
f = theano.function([x], new_x)
return f(data)
示例7: est_log_part_fun
def est_log_part_fun(self):
# init first visible data
v_mean = T.nnet.softmax(self.base_vbias)[0]
v_mean_rep = T.tile(v_mean, (self.numruns,)).reshape((self.numruns, self.model.num_vis))
D = T.tile(T.sum(self.base_vbias, axis=0).dimshuffle('x'), (self.numruns,))
v_samples, updates = theano.scan(fn=self.multinom_sampler,non_sequences=[v_mean_rep, D], n_steps=10)
v = v_samples[-1]
# init logw with beta = 0
logw = - self.log_p_k(v, 0., D)
[logw_list, vs, Ds], updates = theano.scan(self.ais_step, sequences = self.betas[1:], outputs_info = [logw, v, None])
logw = logw_list[-1]
v = vs[-1]
D = Ds[-1]
logw += self.log_p_k(v, 1, D)
r = logsum(logw) - T.log(self.numruns)
log_z_base = T.sum(T.log(1+T.exp(self.base_vbias))) + (self.model.num_hid)*T.log(2)
log_z_est = r + log_z_base
perform_fun = theano.function([], log_z_est, updates=updates)
return perform_fun()
示例8: recurrence
def recurrence(x_t, h_tm1, c_tm1):
i = T.nnet.sigmoid(T.dot(x_t, self.wi) + T.dot(h_tm1, self.wih) + self.bi) # input gate
c_proposed = T.tanh(T.dot(x_t, self.wc) + T.dot(h_tm1, self.wch) + self.bc) # proposed memory cell content
f = T.nnet.sigmoid(T.dot(x_t, self.wf) + T.dot(h_tm1, self.wfh) + self.bf) # forget gate
c_t = (T.tile(i, self.memory_size) * c_proposed) + (T.tile(f, self.memory_size) * c_tm1) # new memory cell content
o = T.nnet.sigmoid(T.dot(x_t, self.wo) + T.dot(h_tm1, self.woh) + self.bo) # output gate
h_t = T.tile(o, self.memory_size) * T.tanh(c_t)
return [h_t, c_t]
示例9: setup_generate
def setup_generate(self):
# dimensions: (batch, time, 12)
chord_types = T.btensor3()
# dimensions: (batch, time)
chord_roots = T.imatrix()
n_batch, n_time = chord_roots.shape
spec = self.lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(self.encoding.STARTING_POSITION, np.int32), (n_batch)),
start_out=T.tile(self.encoding.initial_encoded_form(), (n_batch,1)),
timestep=T.tile(T.arange(n_time), (n_batch,1)),
cur_chord_type=chord_types,
cur_chord_root=chord_roots,
deterministic_dropout=True )
def _scan_fn(*inputs):
# inputs is [ spec_sequences..., last_absolute_position, spec_taps..., spec_non_sequences... ]
inputs = list(inputs)
last_absolute_chosen = inputs.pop(len(spec.sequences))
scan_rout = self.lstmstack.sample_scan_routine(spec, *inputs)
last_rel_pos, last_out, cur_kwargs = scan_rout.send(None)
new_pos = self.encoding.get_new_relative_position(last_absolute_chosen, last_rel_pos, last_out, self.bounds.lowbound, self.bounds.highbound, **cur_kwargs)
addtl_kwargs = {
"last_output": last_out
}
out_activations = scan_rout.send((new_pos, addtl_kwargs))
out_probs = self.encoding.decode_to_probs(out_activations,new_pos,self.bounds.lowbound, self.bounds.highbound)
sampled_note = Encoding.sample_absolute_probs(self.srng, out_probs)
encoded_output = self.encoding.note_to_encoding(sampled_note, new_pos, self.bounds.lowbound, self.bounds.highbound)
scan_outputs = scan_rout.send(encoded_output)
scan_rout.close()
return [sampled_note, out_probs] + scan_outputs
outputs_info = [{"initial":T.zeros((n_batch,),'int32'), "taps":[-1]}, None] + spec.outputs_info
result, updates = theano.scan(fn=_scan_fn, sequences=spec.sequences, non_sequences=spec.non_sequences, outputs_info=outputs_info)
all_chosen = result[0].dimshuffle((1,0))
all_probs = result[1].dimshuffle((1,0,2))
self.generate_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=all_chosen,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
self.generate_visualize_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=[all_chosen, all_probs],
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
示例10: mmd_full
def mmd_full(x_t, y_t, alpha=0.5):
""" Implementation of the full kernel MMD statistic (gaussian kernel)"""
N = x_t.shape[1]
M = y_t.shape[1]
term1 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, N) - T.tile(x_t, N))))
term2 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, M) - T.tile(y_t, N))))
term3 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(y_t, M) - T.tile(y_t, M))))
return term1 - 2 * term2 + term3
示例11: IRNN
def IRNN(n_input, n_hidden, n_output, input_type='real', out_every_t=False, loss_function='CE'):
np.random.seed(1234)
rng = np.random.RandomState(1234)
x, y = initialize_data_nodes(loss_function, input_type, out_every_t)
inputs = [x, y]
h_0 = theano.shared(np.zeros((1, n_hidden), dtype=theano.config.floatX))
V = initialize_matrix(n_input, n_hidden, 'V', rng)
W = theano.shared(np.identity(n_hidden, dtype=theano.config.floatX))
out_mat = initialize_matrix(n_hidden, n_output, 'out_mat', rng)
hidden_bias = theano.shared(np.zeros((n_hidden,), dtype=theano.config.floatX))
out_bias = theano.shared(np.zeros((n_output,), dtype=theano.config.floatX))
parameters = [h_0, V, W, out_mat, hidden_bias, out_bias]
def recurrence(x_t, y_t, h_prev, cost_prev, acc_prev, V, W, hidden_bias, out_mat, out_bias):
if loss_function == 'CE':
data_lin_output = V[x_t]
else:
data_lin_output = T.dot(x_t, V)
h_t = T.nnet.relu(T.dot(h_prev, W) + data_lin_output + hidden_bias.dimshuffle('x', 0))
if out_every_t:
lin_output = T.dot(h_t, out_mat) + out_bias.dimshuffle('x', 0)
cost_t, acc_t = compute_cost_t(lin_output, loss_function, y_t)
else:
cost_t = theano.shared(NP_FLOAT(0.0))
acc_t = theano.shared(NP_FLOAT(0.0))
return h_t, cost_t, acc_t
non_sequences = [V, W, hidden_bias, out_mat, out_bias]
h_0_batch = T.tile(h_0, [x.shape[1], 1])
if out_every_t:
sequences = [x, y]
else:
sequences = [x, T.tile(theano.shared(np.zeros((1,1), dtype=theano.config.floatX)), [x.shape[0], 1, 1])]
outputs_info = [h_0_batch, theano.shared(NP_FLOAT(0.0)), theano.shared(NP_FLOAT(0.0))]
[hidden_states, cost_steps, acc_steps], updates = theano.scan(fn=recurrence,
sequences=sequences,
non_sequences=non_sequences,
outputs_info = outputs_info)
if not out_every_t:
lin_output = T.dot(hidden_states[-1,:,:], out_mat) + out_bias.dimshuffle('x', 0)
costs = compute_cost_t(lin_output, loss_function, y)
else:
cost = cost_steps.mean()
accuracy = acc_steps.mean()
costs = [cost, accuracy]
return inputs, parameters, costs
示例12: weighted_binary_cross_entropy_4
def weighted_binary_cross_entropy_4(pred, target, class_normalization):
# Mix of 0 and 2
# From theano
DIM = pred.shape[1]
BATCH_SIZE = pred.shape[0]
N_on_per_batch = (T.transpose(T.tile(target.sum(axis=1), (DIM, 1))) + 1)
N_off_per_batch = (T.transpose(T.tile((1-target).sum(axis=1), (DIM, 1))) + 1)
class_norm_tile = T.tile(class_normalization, (BATCH_SIZE, 1))
return -(class_norm_tile * target * T.log(pred) / N_on_per_batch + (1.0 - target) * T.log(1.0 - pred) / N_off_per_batch)
示例13: get_input_vectors
def get_input_vectors(shape, phases, scaling, offset):
x = T.repeat(offset[0] + T.arange(shape[0]) / scaling, shape[1] * phases).reshape(
(shape[0], shape[1], phases)) * T.pow(2, T.arange(phases))
y = T.repeat(T.tile(offset[1] + T.arange(shape[1]) / scaling, shape[0]).reshape(
(shape[0], shape[1], 1)), phases, axis=2) * T.pow(2, T.arange(phases))
z = T.tile(offset[2] + 10 * T.arange(phases), shape[0] * shape[1]).reshape((shape[0], shape[1], phases, 1))
x = x.reshape((shape[0], shape[1], phases, 1))
y = y.reshape((shape[0], shape[1], phases, 1))
return T.concatenate([x, y, z], axis=3).reshape((shape[0] * shape[1] * phases, 3)).astype('float32')
示例14: initial_states
def initial_states(self, batch_size, *args, **kwargs):
states_dict = self.fst.expand({self.fst.fst.start: 0.0})
states = tensor.as_tensor_variable(
self.transition.pad(states_dict.keys(), NOT_STATE))
states = tensor.tile(states[None, :], (batch_size, 1))
weights = tensor.as_tensor_variable(
self.transition.pad(states_dict.values(), 0))
weights = tensor.tile(weights[None, :], (batch_size, 1))
add = self.probability_computer(states, weights)
return states, weights, add
示例15: __init__
def __init__(self, rng, input, num_filters, input_shape):
self.K = num_filters
self.N = input_shape[2] * input_shape[3]
self.D = input_shape[1]
self.B = input_shape[0]
self.input = input
filter_shape = (self.K, self.D, 1, 1)
fan_in = numpy.prod(filter_shape[1:])
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]))
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
c_bound = numpy.sqrt(1. / (self.K * self.D))
self.c = theano.shared(
numpy.asarray(
rng.uniform(low=-c_bound, high=c_bound, size=(self.K, self.D)),
dtype=theano.config.floatX
),
borrow=True
)
conved = conv2d(input, self.W,
input_shape=input_shape,
filter_shape=filter_shape)
conved = conved + self.b.dimshuffle('x', 0, 'x', 'x')
conved = conved.reshape((self.B, self.K, self.N))
a = self.softmax3d(conved)
x = input.reshape((self.B, self.D, self.N))
v = theano.shared(numpy.zeros((self.B, self.K, self.D), dtype=theano.config.floatX))
for k in range(self.K):
ar = T.tile(a[:,k], (1,self.D)).reshape((self.B, self.D, self.N))
cr = T.tile(self.c[k].reshape((1,self.D,1)), (self.B, 1, self.N))
vr = (ar*(x+cr)).sum(2)
g = T.sqrt((vr**2).sum(1)) # add eps?
v = T.set_subtensor(v[:,k,:], vr/T.tile(g.reshape((self.B, 1)), (1, self.D)))
# v = v/T.sqrt((v**2).sum()) # whole normalize
self.output = v
self.params = [self.W, self.b, self.c]