本文整理汇总了Python中theano.tensor.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了flatten函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unet_crossentropy_loss_sampled
def unet_crossentropy_loss_sampled(y_true, y_pred):
print 'unet_crossentropy_loss_sampled'
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
indNeg = T.nonzero(1-y_true)[0]
# shuffle
n = indPos.shape[0]
indPos = indPos[srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[srng.permutation(n=n)]
# take equal number of samples depending on which class has less
n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
average_loss = T.mean(loss_vector)
print 'average_loss:', average_loss
return average_loss
示例2: loop
def loop(i, x, p, t):
p_class_t = p[i, t[i]]
return T.dot(
T.flatten(T.grad(p_class_t, x)[i]),
T.flatten(x[i])
)
示例3: __call__
def __call__(self, x, leak):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
if leak.ndim == 1:
return T.flatten(f1, 1)[0] * x + T.flatten(f2, 1)[0] * abs(x)
else:
return f1 * x + f2 * abs(x)
示例4: build_model
def build_model(tparams, options, Wemb):
trng = RandomStreams(123)
use_noise = theano.shared(numpy_floatX(0.))
x = T.matrix('x', dtype='int32')
t = T.matrix('t', dtype=config.floatX)
mask = T.matrix('mask', dtype=config.floatX)
y = T.vector('y', dtype='int32')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
x_emb = Wemb[x.flatten()].reshape([n_timesteps,n_samples,options['embDimSize']])
x_t_emb = T.concatenate([t.reshape([n_timesteps,n_samples,1]), x_emb], axis=2) #Adding the time element to the embedding
proj = gru_layer(tparams, x_t_emb, options, mask=mask)
if options['use_dropout']: proj = dropout_layer(proj, use_noise, trng)
p_y_given_x = T.nnet.sigmoid(T.dot(proj, tparams['W_logistic']) + tparams['b_logistic'])
L = -(y * T.flatten(T.log(p_y_given_x)) + (1 - y) * T.flatten(T.log(1 - p_y_given_x)))
cost = T.mean(L)
if options['L2_reg'] > 0.: cost += options['L2_reg'] * (tparams['W_logistic'] ** 2).sum()
return use_noise, x, t, mask, y, p_y_given_x, cost
示例5: _recurrence
def _recurrence(v_h_, x_h_, v_t_, x_t_, a_t_, is_aggressive):
state = tt.concatenate([v_h_, x_h_, tt.flatten(v_t_), tt.flatten(x_t_), tt.flatten(a_t_)])
h0 = tt.dot(state, self.W_a_0) + self.b_a_0
relu0 = tt.nnet.relu(h0)
h1 = tt.dot(relu0, self.W_a_1) + self.b_a_1
relu1 = tt.nnet.relu(h1)
h2 = tt.dot(relu1, self.W_a_2) + self.b_a_2
relu2 = tt.nnet.relu(h2)
a = tt.dot(relu2, self.W_a_c)
v_h, x_h, v_t, x_t, a_t, cost_transition = _step_state(v_h_, x_h_, v_t_, x_t_, a_t_, a, is_aggressive)
# cost:
# 0. smooth acceleration policy
cost_accel = tt.abs_(a)
# 1. forcing the host to move forward (until the top point of the roundabout)
cost_progress = tt.nnet.relu(0.5*self.two_pi_r-x_h)
# 2. keeping distance from close vehicles
x_abs_diffs = tt.abs_(x_h - x_t)
cost_accident = tt.mean(3*tt.nnet.relu( self.require_distance-x_abs_diffs )) * (x_h > - 0.5*self.host_length) #tt.nnet.sigmoid(x_h + 0.5*self.host_length)
cost = self.alpha_accel * cost_accel + self.alpha_progress * cost_progress + self.alpha_accident * cost_accident
return (v_h, x_h, v_t, x_t, a_t, cost, cost_transition), t.scan_module.until(x_h[0]>=0.45*self.two_pi_r)
示例6: apply
def apply(self, dataset, can_fit=True):
x = dataset.get_design_matrix()
denseX = T.matrix(dtype=x.dtype)
image_shape = (len(x),) + self.img_shape
X = denseX.reshape(image_shape)
filters = gaussian_filter_9x9().reshape((1,1,9,9))
convout = conv.conv2d(input = X,
filters = filters,
image_shape = image_shape,
filter_shape = (1, 1, 9, 9),
border_mode='full')
# For each pixel, remove mean of 9x9 neighborhood
centered_X = X - convout[:,:,4:-4,4:-4]
# Scale down norm of 9x9 patch if norm is bigger than 1
sum_sqr_XX = conv.conv2d(input = centered_X**2,
filters = filters,
image_shape = image_shape,
filter_shape = (1, 1, 9, 9),
border_mode='full')
denom = T.sqrt(sum_sqr_XX[:,:,4:-4,4:-4])
per_img_mean = T.mean(T.flatten(denom, outdim=3), axis=2)
divisor = T.largest(per_img_mean.dimshuffle((0,1,'x','x')), denom)
new_X = centered_X / divisor
new_X = T.flatten(new_X, outdim=2)
f = theano.function([denseX], new_X)
dataset.set_design_matrix(f(x))
示例7: unet_crossentropy_loss_sampled
def unet_crossentropy_loss_sampled(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
classPos = 1
classNeg = 0
indPos = T.eq(y_true, classPos).nonzero()[0]
indNeg = T.eq(y_true, classNeg).nonzero()[0]
#pos = y_true[ indPos ]
#neg = y_true[ indNeg ]
# shuffle
n = indPos.shape[0]
indPos = indPos[UNET.srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[UNET.srng.permutation(n=n)]
# take equal number of samples depending on which class has less
n_samples = T.cast(T.min([ indPos.shape[0], indNeg.shape[0]]), dtype='int64')
#n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
#loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(y_pred_clipped[indNeg]))
loss_vector = T.clip(loss_vector, epsilon, 1.0-epsilon)
average_loss = T.mean(loss_vector)
if T.isnan(average_loss):
average_loss = T.mean( y_pred_clipped[indPos])
return average_loss
示例8: test_shape
def test_shape():
x = T.tensor3()
x_flat_2_mat = T.flatten(x, 2)
x_flat_2_vec = T.flatten(x, 1)
flat_f = theano.function([x], [x_flat_2_mat, x_flat_2_vec])
flat_mat_val, flat_vec_val = flat_f(tensor3_val)
print 'flatten to 2-d array:'
print flat_mat_val
print 'flatten to 1-d array:'
print flat_vec_val
x_mat = T.matrix()
x_mat_2_t3 = T.reshape(x_mat, (2, 2, 2))
x_mat_2_vec = T.reshape(x_mat, (8,))
reshape_f = theano.function([x_mat], [x_mat_2_t3, x_mat_2_vec])
"""
t3_shape = T.lvector()
vec_shape = T.lvector()
x_mat_2_t3 = T.reshape(x_mat, t3_shape, 3)
x_mat_2_vec = T.reshape(x_mat, vec_shape, 1)
reshape_f = theano.function([x_mat, t3_shape, vec_shape], [x_mat_2_t3, x_mat_2_vec])
"""
mat_2_t3_val, mat_2_vec_val = reshape_f(flat_mat_val)
print 'reshape 2-d array to 3-d array:'
print mat_2_t3_val
print 'reshape 2-d array to 1-d array:'
print mat_2_vec_val
示例9: __create_node_set
def __create_node_set(self, n_features, n_output, data_in, note_set_name, weightsFunc = None,):
prev_out = data_in
prev_dim = n_features
layers = []
n_weights = 0
weights_list = []
state = None
for i_h_layer in range(0,len(self.hidden_dimensions)):
n_hidden_nodes = self.hidden_dimensions[i_h_layer]
weights = None
#weights = np.ones((prev_dim,n_hidden_nodes)) - 0.5
bias = None
if weightsFunc is not None:
weights,bias,state = weightsFunc(i_h_layer,state)
#acutal hidden layer
hidden_layer = Layer(data_in=prev_out,
n_input=prev_dim,
n_output=n_hidden_nodes,
link_function=self.link_function_hidden,
weights=weights,
bias=bias,
name=note_set_name + " Hidden Layer")
weights_list.append(hidden_layer.weights)
weights_list.append(hidden_layer.bias)
layers.append(hidden_layer)
n_weights += (prev_dim+1)*n_hidden_nodes
prev_out = hidden_layer.output
prev_dim = n_hidden_nodes
weights = None
#weights = np.ones((prev_dim,n_output)) - 0.5
bias = None
if weightsFunc is not None:
weights,bias,state = weightsFunc(len(self.hidden_dimensions),state)
output_layer = Layer(
data_in=prev_out,
n_input=prev_dim,
n_output=n_output,
link_function=self.link_function_output,
weights=weights,
bias=bias,
name=note_set_name + " Output Layer")
weights_list.append(output_layer.weights)
weights_list.append(output_layer.bias)
layers.append(output_layer)
n_weights += (prev_dim+1)*n_output
#concatenate weights into one huge vector
flat_weights = T.concatenate([T.flatten(item) for item in weights_list])
flat_weights.name = "Network " + note_set_name + " Weights"
#compute MSE
y = self.__y
errors = y - output_layer.output
mse = T.mean(T.sqr(errors))
normalized_mse = mse / 2.0
normalized_mse.name = note_set_name + " MSE"
grads = T.concatenate([T.flatten(item) for item in T.grad(normalized_mse, weights_list)])
grads.name = note_set_name + " Gradients"
return layers,grads,normalized_mse,weights_list, n_weights, flat_weights
示例10: lower_bound
def lower_bound(self):
mu = T.flatten(self.trunc_output, outdim=2)
inp = T.flatten(self.inpt, outdim=2)
if self.out_distribution == True:
sigma = T.mean(T.flatten(self.trunk_sigma, outdim=2))
else:
sigma = 0
# log_gauss = 0.5*np.log(2 * np.pi) + 0.5*sigma + 0.5 * ((inp - mu) / T.exp(sigma))**2.
log_gauss = T.sum(0.5 * np.log(2 * np.pi) + 0.5 * sigma + 0.5 * ((inp - mu) / T.exp(sigma)) ** 2.0, axis=1)
return T.mean(log_gauss - self.latent_layer.prior)
示例11: model
def model(X1, X2, w1, w2, w3, p_drop_conv):
# first half of the first layer
l1a = T.flatten(dropout(T.mean(rectify(conv2d(X1, w1, border_mode='valid')), axis=3), p_drop_conv), outdim=2)
# second half of the first layer
l1b = T.flatten(dropout(T.mean(rectify(conv2d(X2, w2, border_mode='valid')), axis=3), p_drop_conv), outdim=2)
# combine two pars as first layer
l1 = T.concatenate([l1a, l1b], axis=1)
# combine two pars as first layer
pyx = T.dot(l1, w3)
return pyx
示例12: t_unroll_ae
def t_unroll_ae(wts, bs, tied_wts=False):
''' Flattens matrices and concatenates to a vector - specifically for autoencoders '''
# if we have tied weights, this vector will be comprised of a single matrix and two
# distinct bias vectors
if tied_wts:
v = np.array([], type=theano.config.floatX)
v = T.concatenate(
(v, T.flatten(wts[0]), T.flatten(bs[0]), T.flatten(bs[1])))
return v
return t_unroll(wts, bs)
示例13: model
def model(X,
h2_u, h3_u,
h2_s, h3_s,
w, w2, g2, b2, w3, g3, b3, wy
):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
f = T.concatenate([h, h2, h3], axis=1)
return [f]
示例14: model
def model(X, w1, w2, w3, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w1, border_mode='full'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
dropout(T.flatten(max_pool_2d(rectify(conv2d(X, w2)), (2,2)), outdim=2), 0.3)
l2a = rectify(conv2d(l1, w2))
l2b = max_pool_2d(l2a, (2, 2))
l2 = T.flatten(l2b, outdim=2)
l2 = dropout(l2, p_drop_conv)
pyx = softmax(T.dot(l2, w3))
return l1, l2, pyx
示例15: set_sampling_function
def set_sampling_function(decoder_feature_function,
decoder_red_function,
decoder_green_function,
decoder_blue_function):
hidden_data = T.matrix(name='hidden_data',
dtype=theano.config.floatX)
# decoder
decoder_outputs = decoder_feature_function(hidden_data)
decoder_feature = decoder_outputs[1]
decoder_red = decoder_red_function(decoder_feature)
decoder_green = decoder_green_function(decoder_feature)
decoder_blue = decoder_blue_function(decoder_feature)
num_samples = decoder_red.shape[0]
num_rows = decoder_red.shape[2]
num_cols = decoder_red.shape[3]
num_pixels = num_rows*num_cols
# shape = (num_samples, num_intensity, num_pixels)
decoder_red = T.flatten(decoder_red, 3)
decoder_green = T.flatten(decoder_green, 3)
decoder_blue = T.flatten(decoder_blue, 3)
# shape = (num_samples, num_pixels, num_intensity)
decoder_red = T.swapaxes(decoder_red, axis1=1, axis2=2)
decoder_green = T.swapaxes(decoder_green, axis1=1, axis2=2)
decoder_blue = T.swapaxes(decoder_blue, axis1=1, axis2=2)
# shape = (num_samples*num_pixels, num_intensity)
decoder_red = decoder_red.reshape((num_samples*num_pixels, -1))
decoder_green = decoder_green.reshape((num_samples*num_pixels, -1))
decoder_blue = decoder_blue.reshape((num_samples*num_pixels, -1))
# softmax
decoder_red = T.argmax(T.nnet.softmax(decoder_red),axis=1)
decoder_green = T.argmax(T.nnet.softmax(decoder_green),axis=1)
decoder_blue = T.argmax(T.nnet.softmax(decoder_blue),axis=1)
decoder_red = decoder_red.reshape((num_samples, 1, num_rows, num_cols))
decoder_green = decoder_green.reshape((num_samples, 1, num_rows, num_cols))
decoder_blue = decoder_blue.reshape((num_samples, 1, num_rows, num_cols))
decoder_image = T.concatenate([decoder_red, decoder_green, decoder_blue], axis=1)
function_inputs = [hidden_data,]
function_outputs = [decoder_image,]
function = theano.function(inputs=function_inputs,
outputs=function_outputs,
on_unused_input='ignore')
return function