本文整理汇总了Python中theano.tensor.unbroadcast函数的典型用法代码示例。如果您正苦于以下问题:Python unbroadcast函数的具体用法?Python unbroadcast怎么用?Python unbroadcast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unbroadcast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_Lx_batches
def compute_Lx_batches(v, g, h, xw_mat, xv_mat, xa, xb, xc, bs, cbs):
xw = xw_mat.flatten()
xv = xv_mat.flatten()
tv = v.reshape((bs // cbs, cbs, v.shape[1]))
tg = g.reshape((bs // cbs, cbs, g.shape[1]))
th = h.reshape((bs // cbs, cbs, h.shape[1]))
final_w1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xw_mat)),0)
final_v1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xv_mat)),0)
final_a1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xa)),0)
final_b1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xb)),0)
final_c1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xc)),0)
def comp_step(lv, lg, lh,
acc_w1, acc_v1, acc_a1, acc_b1, acc_c1):
terms1 = compute_Lx_term1(lv, lg, lh, xw, xv, xa, xb, xc)
accs1 = [acc_w1, acc_v1, acc_a1, acc_b1, acc_c1]
rval = []
for (term1, acc) in zip(terms1,accs1):
rval += [acc + term1]
return rval
rvals,_ = theano.sandbox.scan.scan(
comp_step,
sequences=[tv,tg,th],
states=[
final_w1, final_v1, final_a1, final_b1, final_c1],
n_steps=bs // cbs,
profile=0,
mode=theano.Mode(linker='cvm_nogc'),
flags=['no_optimization'] )
accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
accs2 = compute_Lx_term2(v,g,h,xw,xv,xa,xb,xc)
return [x - y for x, y in zip(accs1, accs2)]
示例2: get_debug
def get_debug(self, train=False):
input_dict = self.get_input(train)
X = input_dict[self.dec_input_name]
prev_state = input_dict[self.enc_name]
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
xi = T.dot(X, self.W_i) + self.b_i + T.dot(prev_state, self.We_i)
xf = T.dot(X, self.W_f) + self.b_f + T.dot(prev_state, self.We_f)
xc = T.dot(X, self.W_c) + self.b_c + T.dot(prev_state, self.We_c)
xo = T.dot(X, self.W_o) + self.b_o + T.dot(prev_state, self.We_o)
if train:
STEP = self._step
else:
STEP = self._step_test
[outputs, hiddens, memories], updates = theano.scan(
STEP,
sequences=[xi, xf, xo, xc, padded_mask],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim),1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.hidden_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.hidden_dim), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c, prev_state],
truncate_gradient=self.truncate_gradient,
go_backwards=self.go_backwards)
return outputs, hiddens, memories, prev_state
示例3: generic_compute_Lx_batches
def generic_compute_Lx_batches(samples, weights, biases, bs, cbs):
tsamples = [x.reshape((bs//cbs, cbs, x.shape[1])) for x in samples]
final_ws = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
for x in weights]
final_bs = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
for x in biases]
n_samples = len(samples)
n_weights = len(weights)
n_biases = len(biases)
def comp_step(*args):
lsamples = args[:n_samples]
terms1 = generic_compute_Lx_term1(lsamples, weights, biases)
rval = []
for (term1, acc) in zip(terms1, args[n_samples:]):
rval += [acc + term1]
return rval
rvals,_ = theano.sandbox.scan.scan(
comp_step,
sequences=tsamples,
states=final_ws + final_bs,
n_steps=bs // cbs,
profile=0,
mode=theano.Mode(linker='cvm_nogc'),
flags=['no_optimization'] )
accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
accs2 = generic_compute_Lx_term2(samples,weights,biases)
return [x - y for x, y in zip(accs1, accs2)]
示例4: __init__
def __init__(self, pad_x=0, pad_y=0, d_row=-1, **kwargs):
super(OneDToTwoDFixedSizeLayer, self).__init__(1, **kwargs)
assert len(self.sources) == 1
X = self.sources[0].output
assert X.ndim == 3
assert X.dtype == "float32"
if d_row > 0:
X = X.reshape((X.shape[0],X.shape[1],d_row,X.shape[2] / d_row))
Y = T.unbroadcast(X.dimshuffle(2, 0, 1, 3), 3)
n_out = self.sources[0].attrs['n_out'] / d_row
else:
Y = X.dimshuffle(2, 0, 1, 'x')
n_out = 1
if pad_x + pad_y > 0:
tmp = T.zeros((Y.shape[1] + 2 * pad_x, Y.shape[2]), 'int8')
self.index = T.set_subtensor(tmp[pad_x: pad_x + Y.shape[1]], self.sources[0].index)
tmp = T.zeros((Y.shape[0] + 2 * pad_y, Y.shape[1] + 2 * pad_x, Y.shape[2], Y.shape[3]), 'float32')
Y = T.set_subtensor(tmp[pad_y:pad_y + Y.shape[0],pad_x:pad_x + Y.shape[1]], Y)
Y = T.unbroadcast(Y, 3)
height = Y.shape[0] # if n_out <= 0 else n_out
width = T.maximum(T.sum(self.index, axis=0), T.ones_like(self.index[0]))
batch = Y.shape[2]
sizes = T.zeros((batch, 2), dtype="float32")
sizes = T.set_subtensor(sizes[:, 0], height)
sizes = T.set_subtensor(sizes[:, 1], width)
self.output = Y
self.output_sizes = sizes
self.set_attr('n_out', n_out)
示例5: fprop
def fprop(self, data):
if self.use_ground_truth:
self.input_space.validate(data)
features, phones = data
init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
init_out = T.unbroadcast(init_out, 0)
fn = lambda f, p, h, o: self.fprop_step(f, p, h, o)
((h, out), updates) = theano.scan(fn=fn,
sequences=[features, phones],
outputs_info=[dict(initial=init_h,
taps=[-1]),
init_out])
return out
else:
self.input_space.validate(data)
features, phones = data
init_in = features[0]
init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
init_out = T.unbroadcast(init_out, 0)
fn = lambda t, p, f, h, o: self.fprop_step_prime(t, p, f, h, o)
((f, h, out), updates) = theano.scan(fn=fn,
sequences=[features, phones],
outputs_info=[init_in,
dict(initial=init_h,
taps=[-1]),
init_out])
return out
示例6: pos_phase
def pos_phase(self, v, init_state, n_steps=1, eps=1e-3):
"""
Mixed mean-field + sampling inference in positive phase.
:param v: input being conditioned on
:param init: dictionary of initial values
:param n_steps: number of Gibbs updates to perform afterwards.
"""
def pos_mf_iteration(g1, h1, v, pos_counter):
h2 = self.h_hat(g1, v)
s2_1 = self.s1_hat(g1, v)
s2_0 = self.s0_hat(g1, v)
g2 = self.g_hat(h2, s2_1, s2_0)
# stopping criterion
dl_dghat = T.max(abs(self.dlbound_dg(g2, h2, s2_1, s2_0, v)))
dl_dhhat = T.max(abs(self.dlbound_dh(g2, h2, s2_1, s2_0, v)))
stop = T.maximum(dl_dghat, dl_dhhat)
return [g2, h2, s2_1, s2_0, v, pos_counter + 1], theano.scan_module.until(stop < eps)
states = [T.unbroadcast(T.shape_padleft(init_state['g'])),
T.unbroadcast(T.shape_padleft(init_state['h'])),
{'steps': 1},
{'steps': 1},
T.unbroadcast(T.shape_padleft(v)),
T.unbroadcast(T.shape_padleft(0.))]
rvals, updates = scan(
pos_mf_iteration,
states = states,
n_steps=n_steps)
return [rval[0] for rval in rvals]
示例7: get_output
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
xsum = T.dot(X, self.W_sum) + self.b_sum ### get gate's input
xmax = T.dot(X, self.W_max) + self.b_max
xmin = T.dot(X, self.W_min) + self.b_min
xsubt = T.dot(X, self.W_subt) + self.b_subt
xmul = T.dot(X, self.W_mul) + self.b_mul
xres = T.dot(X, self.W_res) + self.b_res
xone = T.dot(X, self.W_one) + self.b_one
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
[outputs, memories], updates = theano.scan(
self._step,
sequences=[xsum, xmax, xmin, xsubt, xmul, xres, xone, xi, xf, xo, xc, padded_mask], ### update sequence input
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.U_sum, self.U_max, self.U_min, self.U_subt, self.U_mul, self.U_res, self.U_one, self.U_i, self.U_f, self.U_o, self.U_c], ### add gate's weight matrix
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
示例8: _ct
def _ct(self, other):
''' Helper function to make tensors dimensions compatible'''
if (other.var_set == self.var_set):
return (self.pt_tensor, other.pt_tensor)
union_var_set = other.scope.union(self.scope)
vidx1 = frozenset(self.var_indices)
vidx2 = frozenset(other.var_indices)
union_indices = vidx1.union(vidx2)
shape1 = []
shape2 = []
b1 = []
b2 = []
u1 = []
u2 = []
for i,vidx in enumerate(sorted(union_indices)):
if (vidx in vidx1):
shape1.append(self.discrete_pgm.cardinalities[vidx])
u1.append(i)
else:
shape1.append(1)
b1.append(i)
if (vidx in vidx2):
shape2.append(self.discrete_pgm.cardinalities[vidx])
u2.append(i)
else:
shape2.append(1)
b2.append(i)
t1 = T.addbroadcast(T.unbroadcast(self.pt_tensor.reshape(shape1, len(shape1)), *u1), *b1)
t2 = T.addbroadcast(T.unbroadcast(other.pt_tensor.reshape(shape2, len(shape2)), *u2), *b2)
return (t1, t2)
示例9: fprop
def fprop(self, state_below):
"""
:description:
:type state_below: theano matrix
:param state_below: a two dimensional matrix where the first dim represents time and the second dim represents features: shape = (time, features)
"""
# init_output = T.alloc(np.cast[theano.config.floatX](0), state_below.shape[0], self.n_hid)
init_output = T.alloc(np.cast[theano.config.floatX](0), self.n_hid)
Wxh, bxh, Whh, bhh, Who, bho = self.Wxh, self.bxh, self.Whh, self.bhh, self.Who, self.bho
state_below = T.dot(state_below, Wxh) + bxh
if state_below.shape[0] == 1:
init_output = T.unbroadcast(init_output, 0)
if self.n_hid == 1:
init_output = T.unbroadcast(init_output, 1)
def fprop_step(state_below_timestep, state_before_timestep, Whh, bhh):
return self.nonlinearity(state_below_timestep + T.dot(state_before_timestep, Whh) + bhh)
outputs, updates = scan(
fn=fprop_step, sequences=[state_below], outputs_info=[init_output], non_sequences=[Whh, bhh]
)
# reconstruct input
# outputs = T.dot(outputs, Who) + bho
if self.return_indices is not None:
if len(self.return_indices) > 1:
return [outputs[idx] for idx in self.return_indices]
else:
return outputs[self.return_indices[0]]
else:
return outputs
示例10: __call__
def __call__(self, X, mask=None, init_state=None):
if mask is None:
mask = T.ones((X.shape[0], X.shape[1]))
mask = T.shape_padright(mask) # (nb_samples, time, 1)
mask = T.addbroadcast(mask, -1) # (time, nb_samples, 1) matrix.
mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1)
mask = mask.astype('int8')
# mask, padded_mask = self.get_padded_shuffled_mask(mask, pad=1)
X = X.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
if init_state:
# (batch_size, output_dim)
outputs_info = T.unbroadcast(init_state, 1)
else:
outputs_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
outputs, updates = theano.scan(
self._step,
# sequences=[x_z, x_r, x_h, padded_mask, mask],
sequences=[x_z, x_r, x_h, mask],
outputs_info=outputs_info,
non_sequences=[self.U_z, self.U_r, self.U_h])
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
示例11: get_output
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
# Create X_tm1 sequence through zero left-padding
Z = T.zeros_like(X)
X_tm1 = T.concatenate(([Z[0]], X), axis=0)
x_f = T.dot(X, self.W_xf) + self.b_f
x_z = T.dot(X, self.W_xz) + self.b_z
x_o = T.dot(X, self.W_xo) + self.b_o
h_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
c_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
[outputs, cells], updates = theano.scan(
self._step,
sequences=[x_f, x_z, x_o, padded_mask, X_tm1],
outputs_info=[h_info, c_info],
non_sequences=[self.U_hf, self.U_xz, self.U_xo],
truncate_gradient=self.truncate_gradient,
go_backwards=self.go_backwards)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
示例12: get_output
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
[outputs, memories], updates = theano.scan(
self._step,
sequences=[xi, xf, xo, xc, padded_mask],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
truncate_gradient=self.truncate_gradient,
go_backwards=self.go_backwards)
if self.return_sequences and self.go_backwards:
return outputs[::-1].dimshuffle((1, 0, 2))
elif self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
示例13: get_output
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth)
X = X.dimshuffle((1, 0, 2))
x = T.dot(X, self.W) + self.b
if self.depth == 1:
initial = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
else:
initial = T.unbroadcast(T.unbroadcast(alloc_zeros_matrix(self.depth, X.shape[1], self.output_dim), 0), 2)
outputs, updates = theano.scan(
self._step,
sequences=[x, dict(
input=padded_mask,
taps=[(-i) for i in range(self.depth)]
)],
outputs_info=[dict(
initial=initial,
taps=[(-i-1) for i in range(self.depth)]
)],
non_sequences=self.Us,
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
示例14: outputs_info
def outputs_info(self, n_samples):
# initialize hidden states: c, h
shape = (n_samples,) + self.output_shape
return [
T.unbroadcast(T.alloc(numpy.asarray(0., dtype=theano.config.floatX), *shape), *range(len(shape))), # c
T.unbroadcast(T.alloc(numpy.asarray(0., dtype=theano.config.floatX), *shape), *range(len(shape))) # h
]
示例15: generate_with_concat
def generate_with_concat(self, start_token, concat, length, temperature):
start_token = start_token[:, np.newaxis].T
concat = concat[:, np.newaxis].T
N = 1
H = self.lstm.n_hidden
L = self.lstm.n_layers
def step(input, previous_hidden, previous_state, temperature, concat):
lstm_hidden, state = self.lstm.forward(T.concatenate([input, concat], axis=1),
previous_hidden, previous_state)
final_output = self.output.forward(lstm_hidden[:, -1, :], temperature)
sample = self.rng.multinomial(n=1, size=(1,), pvals=final_output, dtype=theano.config.floatX)
return sample, lstm_hidden, state
hidden = T.unbroadcast(T.alloc(np.array(0).astype(theano.config.floatX), N, L, H), 1)
state = T.unbroadcast(T.alloc(np.array(0).astype(theano.config.floatX), N, L, H), 1)
(softmax_output, _, _), updates = theano.scan(step,
outputs_info=[
start_token,
hidden,
state,
],
non_sequences=[temperature, concat],
n_steps=length)
return softmax_output[:, 0, :], updates