當前位置: 首頁>>代碼示例>>Python>>正文


Python nonlinearities.tanh方法代碼示例

本文整理匯總了Python中lasagne.nonlinearities.tanh方法的典型用法代碼示例。如果您正苦於以下問題:Python nonlinearities.tanh方法的具體用法?Python nonlinearities.tanh怎麽用?Python nonlinearities.tanh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在lasagne.nonlinearities的用法示例。


在下文中一共展示了nonlinearities.tanh方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_generator_32

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_generator_32(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet4.output_shape)
    return gnet4 
開發者ID:WANG-Chaoyue,項目名稱:EvolutionaryGAN,代碼行數:21,代碼來源:models_uncond.py

示例2: build_generator_64

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_generator_64(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv3:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv4:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv5:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet6.output_shape)
    return gnet6 
開發者ID:WANG-Chaoyue,項目名稱:EvolutionaryGAN,代碼行數:27,代碼來源:models_uncond.py

示例3: setup_transform_net

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def setup_transform_net(self, input_var=None):
		transform_net = InputLayer(shape=self.shape, input_var=input_var)
		transform_net = style_conv_block(transform_net, self.num_styles, 32, 9, 1)
		transform_net = style_conv_block(transform_net, self.num_styles, 64, 3, 2)
		transform_net = style_conv_block(transform_net, self.num_styles, 128, 3, 2)
		for _ in range(5):
			transform_net = residual_block(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)

		if self.net_type == 0:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, tanh)
			transform_net = ExpressionLayer(transform_net, lambda X: 150.*X, output_shape=None)
		elif self.net_type == 1:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, sigmoid)

		self.network['transform_net'] = transform_net 
開發者ID:joelmoniz,項目名稱:gogh-figure,代碼行數:19,代碼來源:model.py

示例4: build_BiRNN_CNN

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                    precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
                       precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
開發者ID:XuezheMax,項目名稱:LasagneNLP,代碼行數:27,代碼來源:networks.py

示例5: build_BiLSTM_CNN

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                     peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
開發者ID:XuezheMax,項目名稱:LasagneNLP,代碼行數:27,代碼來源:networks.py

示例6: __init__

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def __init__(self):
        self.hdn = None
        self.rep = None
        self.fname_in = None
        self.mod2size = None
        self.mod1size = None
        self.fname_out = None

        self.lr = 0.1
        self.act = "tanh"
        self.epochs = 1000
        self.verbosity = 3
        self.dropout = 0.2
        self.momentum = 0.9
        self.untied = False
        self.l2_norm = False
        self.batch_size = 128
        self.load_model = None
        self.save_model = None
        self.write_after = None
        self.crossmodal = False
        self.exec_command = None
        self.ignore_zeroes = False 
開發者ID:v-v,項目名稱:BiDNN,代碼行數:25,代碼來源:bidnn.py

示例7: build_generator_128

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_generator_128(noise=None, ngf=128):
    lrelu = LeakyRectify(0.2)
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*16*4*4, W=Normal(0.02), nonlinearity=lrelu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*16,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv3:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv4:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv5:", gnet6.output_shape)
    # DeConv Layer
    gnet7 = Deconv2DLayer(gnet6, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet7.output_shape)
    return gnet7 
開發者ID:WANG-Chaoyue,項目名稱:EvolutionaryGAN,代碼行數:31,代碼來源:models_uncond.py

示例8: build_BiRNN

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_BiRNN(incoming, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                precompute_input=True, dropout=True, in_to_out=False):
    # construct the forward and backward rnns. Now, Ws are initialized by He initializer with default arguments.
    # Need to try other initializers for specific tasks.

    # dropout for incoming
    if dropout:
        incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)

    rnn_forward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                mask_input=mask, grad_clipping=grad_clipping,
                                                nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                W_in_to_hid=lasagne.init.GlorotUniform(),
                                                W_hid_to_hid=lasagne.init.GlorotUniform(), name='forward')
    rnn_backward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                 mask_input=mask, grad_clipping=grad_clipping,
                                                 nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                 W_in_to_hid=lasagne.init.GlorotUniform(),
                                                 W_hid_to_hid=lasagne.init.GlorotUniform(), backwards=True,
                                                 name='backward')

    # concatenate the outputs of forward and backward RNNs to combine them.
    concat = lasagne.layers.concat([rnn_forward, rnn_backward], axis=2, name="bi-rnn")

    # dropout for output
    if dropout:
        concat = lasagne.layers.DropoutLayer(concat, p=0.5)

    if in_to_out:
        concat = lasagne.layers.concat([concat, incoming], axis=2)

    # the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
    return concat 
開發者ID:XuezheMax,項目名稱:LasagneNLP,代碼行數:35,代碼來源:networks.py

示例9: build_BiLSTM_HighCNN

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                         peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))

    # dropout after cnn?
    # if dropout:
    # output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)

    # construct highway layer
    highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)

    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
    output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
開發者ID:XuezheMax,項目名稱:LasagneNLP,代碼行數:37,代碼來源:networks.py

示例10: __init__

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def __init__(
            self,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=NL.tanh,
            output_b_init=None,
            weight_signal=1.0,
            weight_nonsignal=1.0, 
            weight_smc=1.0):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Discrete)
        output_b_init = compute_output_b_init(env_spec.action_space.names,
            output_b_init, weight_signal, weight_nonsignal, weight_smc)

        prob_network = MLP(
            input_shape=(env_spec.observation_space.flat_dim,),
            output_dim=env_spec.action_space.n,
            hidden_sizes=hidden_sizes,
            hidden_nonlinearity=hidden_nonlinearity,
            output_nonlinearity=NL.softmax,
            output_b_init=output_b_init
        )
        super(InitCategoricalMLPPolicy, self).__init__(env_spec, hidden_sizes,
            hidden_nonlinearity, prob_network)


# Modified from RLLab GRUNetwork 
開發者ID:vicariousinc,項目名稱:pixelworld,代碼行數:35,代碼來源:init_policy.py

示例11: _get_activation

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def _get_activation(self, l):
        nonlinearities = {'Rectifier': nl.rectify,
                          'Sigmoid': nl.sigmoid,
                          'Tanh': nl.tanh,
                          'Softmax': nl.softmax,
                          'Linear': nl.linear,
                          'ExpLin': explin}

        assert l.type in nonlinearities,\
            "Layer type `%s` is not supported for `%s`." % (l.type, l.name)
        return nonlinearities[l.type] 
開發者ID:aigamedev,項目名稱:scikit-neuralnetwork,代碼行數:13,代碼來源:mlp.py

示例12: build_small_action_cond_encoder_net

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_small_action_cond_encoder_net(input_shapes):
    x_shape, u_shape = input_shapes
    x2_c_dim = x1_c_dim = x_shape[0]
    x1_shape = (x1_c_dim, x_shape[1]//2, x_shape[2]//2)
    x2_shape = (x2_c_dim, x1_shape[1]//2, x1_shape[2]//2)
    y2_dim = 64
    X_var = T.tensor4('X')
    U_var = T.matrix('U')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')

    l_x1 = L.Conv2DLayer(l_x, x1_c_dim, filter_size=6, stride=2, pad=2,
                         W=init.Normal(std=0.01),
                         nonlinearity=nl.rectify)
    l_x2 = L.Conv2DLayer(l_x1, x2_c_dim, filter_size=6, stride=2, pad=2,
                         W=init.Normal(std=0.01),
                         nonlinearity=nl.rectify)

    l_y2 = L.DenseLayer(l_x2, y2_dim, nonlinearity=None, name='y')
    l_y2_diff_pred = LT.BilinearLayer([l_y2, l_u], name='y_diff_pred')
    l_y2_next_pred = L.ElemwiseMergeLayer([l_y2, l_y2_diff_pred], T.add)
    l_x2_next_pred_flat = L.DenseLayer(l_y2_next_pred, np.prod(x2_shape), nonlinearity=None)
    l_x2_next_pred = L.ReshapeLayer(l_x2_next_pred_flat, ([0],) + x2_shape)

    l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, x2_c_dim, filter_size=6, stride=2, pad=2,
                                   W=init.Normal(std=0.01),
                                   nonlinearity=nl.rectify)
    l_x_next_pred = LT.Deconv2DLayer(l_x1_next_pred, x1_c_dim, filter_size=6, stride=2, pad=2,
                                  W=init.Normal(std=0.01),
                                  nonlinearity=nl.tanh,
                                  name='x_next_pred')

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    X_diff_var = T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'SmallActionCondEncoderNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y2_diff_pred), ('y', l_y2), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:44,代碼來源:net_theano.py

示例13: build_vgg_action_cond_encoder_net

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_vgg_action_cond_encoder_net(input_shapes, levels=None, x1_c_dim=16, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    xlevels_c_dim = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            xlevels_c_dim[level] = x_shape[0]
        else:
            xlevels_c_dim[level] = x1_c_dim * 2**(level-1)

    # encoding
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        else:
            l_xlevel = LT.VggEncodingLayer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_diff_pred = LT.create_bilinear_layer(l_xlevel, l_u, level, bilinear_type=bilinear_type, name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:53,代碼來源:net_theano.py

示例14: build_vgg_fcn_action_cond_encoder_net

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_vgg_fcn_action_cond_encoder_net(input_shapes, levels=None, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    xlevels_c_dim = OrderedDict(zip(range(levels[-1]+1), [x_shape[0], 64, 128, 256, 512, 512]))

    # encoding
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        elif level < 3:
            l_xlevel = LT.VggEncodingLayer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
            # TODO: non-trainable encodings
            LT.set_layer_param_tags(l_xlevel, trainable=False)
        else:
            l_xlevel = LT.VggEncoding3Layer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
            # TODO: non-trainable encodings
            LT.set_layer_param_tags(l_xlevel, trainable=False)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_u_outer = LT.OuterProductLayer([l_xlevel, l_u], name='x%d_u_outer')
        l_xlevel_diff_pred = L.Conv2DLayer(l_xlevel_u_outer, xlevels_c_dim[level], filter_size=7, stride=1, pad=3, nonlinearity=None,
                                           name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:56,代碼來源:net_theano.py

示例15: build_multiscale_action_cond_encoder_net

# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import tanh [as 別名]
def build_multiscale_action_cond_encoder_net(input_shapes, levels=None, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    # multi-scale pyramid
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        else:
            l_xlevel = LT.Downscale2DLayer(l_xlevels[level-1], scale_factor=2, name='x%d' % level)
            # l_xlevel = L.Pool2DLayer(l_xlevels[level-1], pool_size=2, mode='average_inc_pad', name='x%d' % level)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_diff_pred = LT.create_bilinear_layer(l_xlevel, l_u, level, bilinear_type=bilinear_type, name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:47,代碼來源:net_theano.py


注:本文中的lasagne.nonlinearities.tanh方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。