當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.get_output方法代碼示例

本文整理匯總了Python中lasagne.layers.get_output方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.get_output方法的具體用法?Python layers.get_output怎麽用?Python layers.get_output使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在lasagne.layers的用法示例。


在下文中一共展示了layers.get_output方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_encoder_func

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def create_encoder_func(layers):
    X = T.fmatrix('X')
    X_batch = T.fmatrix('X_batch')

    Z = get_output(layers['l_encoder_out'], X, deterministic=True)

    encoder_func = theano.function(
        inputs=[theano.In(X_batch)],
        outputs=Z,
        givens={
            X: X_batch,
        },
    )

    return encoder_func


# forward pass for the decoder, p(x|z) 
開發者ID:hjweide,項目名稱:adversarial-autoencoder,代碼行數:20,代碼來源:theano_funcs.py

示例2: create_decoder_func

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def create_decoder_func(layers):
    Z = T.fmatrix('Z')
    Z_batch = T.fmatrix('Z_batch')

    X = get_output(
        layers['l_decoder_out'],
        inputs={
            layers['l_encoder_out']: Z
        },
        deterministic=True
    )

    decoder_func = theano.function(
        inputs=[theano.In(Z_batch)],
        outputs=X,
        givens={
            Z: Z_batch,
        },
    )

    return decoder_func


# forward/backward (optional) pass for the encoder/decoder pair 
開發者ID:hjweide,項目名稱:adversarial-autoencoder,代碼行數:26,代碼來源:theano_funcs.py

示例3: _get_jac_vars

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def _get_jac_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        return jac_vars 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:21,代碼來源:servoing_policy.py

示例4: _get_jac_z_vars

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def _get_jac_z_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
        y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]

        z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
                  for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
        return jac_vars, z_vars 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:26,代碼來源:servoing_policy.py

示例5: test_conv2d

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def test_conv2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = L.Conv2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                           flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = conv2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:22,代碼來源:test_layers_theano.py

示例6: test_channelwise_locally_connected2d

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def test_channelwise_locally_connected2d(x_shape, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, x_shape[0], filter_size=filter_size, channelwise=True,
                                        stride=1, pad='same', flip_filters=flip_filters,
                                        untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = channelwise_locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-7) 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:23,代碼來源:test_layers_theano.py

示例7: build_bilinear_net

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
    x_shape, u_shape = input_shapes
    X_var = X_var or T.tensor4('X')
    U_var = U_var or T.matrix('U')
    X_diff_var = X_diff_var or T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)

    l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
    l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
    l_y = L.flatten(l_x)
    l_y_diff_pred = L.flatten(l_x_diff_pred)

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'BilinearNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:24,代碼來源:net_theano.py

示例8: dist_info_sym

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def dist_info_sym(self, obs_var, state_info_vars):
        n_batches, n_steps = obs_var.shape[:2]
        obs_var = obs_var.reshape((n_batches, n_steps, -1))
        if self._state_include_action:
            prev_action_var = state_info_vars["prev_action"]
            all_input_var = TT.concatenate(
                [obs_var, prev_action_var],
                axis=2
            )
        else:
            all_input_var = obs_var
        return dict(
            prob=L.get_output(
                self._prob_network.output_layer,
                {self._prob_network.input_layer: all_input_var}
            )
        ) 
開發者ID:vicariousinc,項目名稱:pixelworld,代碼行數:19,代碼來源:init_policy.py

示例9: train_function

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def train_function(net):

    # We use dynamic learning rates which change after some epochs
    lr_dynamic = T.scalar(name='learning_rate')

    # Theano variable for the class targets
    targets = T.matrix('targets', dtype=theano.config.floatX)

    # Get the network output
    prediction = l.get_output(net)
    
    # The theano train functions takes images and class targets as input
    log.i("COMPILING TRAIN FUNCTION...", new_line=False)
    start = time.time()
    loss = loss_function(net, prediction, targets)
    updates = net_updates(net, loss, lr_dynamic)
    train_net = theano.function([l.get_all_layers(net)[0].input_var, targets, lr_dynamic], loss, updates=updates, allow_input_downcast=True)
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return train_net

################# PREDICTION FUNCTION #################### 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:24,代碼來源:lasagne_net.py

示例10: test_function

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def test_function(net, hasTargets=True, layer_index=-1):    

    # We need the prediction function to calculate the validation accuracy
    # this way we can test the net during/after training
    # We need a version with targets and one without
    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)

    log.i("COMPILING TEST FUNCTION...", new_line=False)
    start = time.time()
    if hasTargets:
        # Theano variable for the class targets
        targets = T.matrix('targets', dtype=theano.config.floatX)
        
        loss = loss_function(net, prediction, targets)
        accuracy = accuracy_function(net, prediction, targets)
        
        test_net = theano.function([l.get_all_layers(net)[0].input_var, targets], [prediction, loss, accuracy], allow_input_downcast=True)

    else:
        test_net = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)
        
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return test_net 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:26,代碼來源:lasagne_net.py

示例11: make_training_functions

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def make_training_functions(network, encode_layer, input_var, aug_var, fea_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, fea_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, fea_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:18,代碼來源:conv_sup_cc_lbp.py

示例12: make_training_functions

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def make_training_functions(network, encode_layer, input_var, aug_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:18,代碼來源:conv_sup_cc.py

示例13: main

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def main():
    batch_size = args.batch_size
    print('Building model...')
    layer, input_var = build_model(batch_size=batch_size)
    labels_var = T.ivector('labels')
    output = get_output(layer)
    loss = T.nnet.categorical_crossentropy(
        T.nnet.softmax(output), labels_var).mean(
        dtype=theano.config.floatX)
    gradient = T.grad(loss, get_all_params(layer))

    print('Compiling theano functions...')
    forward_func = theano.function([input_var], output)
    full_func = theano.function([input_var, labels_var], gradient)
    print('Functions are compiled')

    images = np.random.rand(batch_size, 3, image_sz, image_sz).astype(np.float32)
    labels = np.random.randint(0, 1000, size=batch_size).astype(np.int32)

    time_theano_run(forward_func, [images], 'Forward')
    time_theano_run(full_func, [images, labels], 'Forward-Backward') 
開發者ID:soumith,項目名稱:convnet-benchmarks,代碼行數:23,代碼來源:benchmark_imagenet.py

示例14: test_batch_size

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def test_batch_size():
    input_var01, input_var16 = T.tensor3s('input01', 'input16')
    l_output01 = model(input_var01, batch_size=1)
    l_output16 = model(input_var16, batch_size=16)

    # Share the parameters for both models
    params01 = get_all_param_values(l_output01)
    set_all_param_values(l_output16, params01)

    posterior_fn01 = theano.function([input_var01], get_output(l_output01))
    posterior_fn16 = theano.function([input_var16], get_output(l_output16))

    example_input = np.random.rand(16, 30, 8)
    example_output16 = posterior_fn16(example_input)
    example_output01 = np.zeros_like(example_output16)

    for i in range(16):
        example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])

    assert example_output16.shape == (16, 30, 8)
    assert np.allclose(example_output16, example_output01, atol=1e-3) 
開發者ID:snipsco,項目名稱:ntm-lasagne,代碼行數:23,代碼來源:test_layers.py

示例15: __init__

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_output [as 別名]
def __init__(self, incoming, depth, n_estimators, n_outputs, pi_iters, **kwargs):
        self._incoming = incoming
        self._depth = depth
        self._n_estimators = n_estimators
        self._n_outputs = n_outputs
        self._pi_iters = pi_iters
        super(NeuralForestLayer, self).__init__(incoming, **kwargs)

        pi_init = Constant(val=1.0 / n_outputs)(((1 << (depth - 1)) * n_estimators, n_outputs))
        pi_name = "%s.%s" % (self.name, 'pi') if self.name is not None else 'pi'
        self.pi = theano.shared(pi_init, name=pi_name)

        # what we want to do here is pi / pi.sum(axis=1)
        # to be safe, if certain rows only contain zeroes (for some pi all y's became 0),
        #     replace such row with 1/n_outputs
        sum_pi_over_y = self.pi.sum(axis=1).dimshuffle(0, 'x')
        all_0_y = T.eq(sum_pi_over_y, 0)
        norm_pi_body = (self.pi + all_0_y * (1.0 / n_outputs)) / (sum_pi_over_y + all_0_y)
        self.normalize_pi = theano.function([], [], updates=[(self.pi, norm_pi_body)])
        self.update_pi_one_iter = self.get_update_pi_one_iter_func()

        self.normalize_pi()

        t_input = T.matrix('t_input')
        self.f_leaf_proba = theano.function([t_input], self.get_probabilities_for(get_output(incoming, t_input))) 
開發者ID:SkidanovAlex,項目名稱:ShallowNeuralDecisionForest,代碼行數:27,代碼來源:neuralforestlayer.py


注:本文中的lasagne.layers.get_output方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。