当前位置: 首页>>代码示例>>Python>>正文


Python nonlinearities.tanh方法代码示例

本文整理汇总了Python中lasagne.nonlinearities.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python nonlinearities.tanh方法的具体用法?Python nonlinearities.tanh怎么用?Python nonlinearities.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lasagne.nonlinearities的用法示例。


在下文中一共展示了nonlinearities.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_generator_32

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_generator_32(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet4.output_shape)
    return gnet4 
开发者ID:WANG-Chaoyue,项目名称:EvolutionaryGAN,代码行数:21,代码来源:models_uncond.py

示例2: build_generator_64

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_generator_64(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv3:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv4:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv5:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet6.output_shape)
    return gnet6 
开发者ID:WANG-Chaoyue,项目名称:EvolutionaryGAN,代码行数:27,代码来源:models_uncond.py

示例3: setup_transform_net

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def setup_transform_net(self, input_var=None):
		transform_net = InputLayer(shape=self.shape, input_var=input_var)
		transform_net = style_conv_block(transform_net, self.num_styles, 32, 9, 1)
		transform_net = style_conv_block(transform_net, self.num_styles, 64, 3, 2)
		transform_net = style_conv_block(transform_net, self.num_styles, 128, 3, 2)
		for _ in range(5):
			transform_net = residual_block(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)

		if self.net_type == 0:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, tanh)
			transform_net = ExpressionLayer(transform_net, lambda X: 150.*X, output_shape=None)
		elif self.net_type == 1:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, sigmoid)

		self.network['transform_net'] = transform_net 
开发者ID:joelmoniz,项目名称:gogh-figure,代码行数:19,代码来源:model.py

示例4: build_BiRNN_CNN

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                    precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
                       precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:27,代码来源:networks.py

示例5: build_BiLSTM_CNN

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                     peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:27,代码来源:networks.py

示例6: __init__

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def __init__(self):
        self.hdn = None
        self.rep = None
        self.fname_in = None
        self.mod2size = None
        self.mod1size = None
        self.fname_out = None

        self.lr = 0.1
        self.act = "tanh"
        self.epochs = 1000
        self.verbosity = 3
        self.dropout = 0.2
        self.momentum = 0.9
        self.untied = False
        self.l2_norm = False
        self.batch_size = 128
        self.load_model = None
        self.save_model = None
        self.write_after = None
        self.crossmodal = False
        self.exec_command = None
        self.ignore_zeroes = False 
开发者ID:v-v,项目名称:BiDNN,代码行数:25,代码来源:bidnn.py

示例7: build_generator_128

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_generator_128(noise=None, ngf=128):
    lrelu = LeakyRectify(0.2)
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*16*4*4, W=Normal(0.02), nonlinearity=lrelu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*16,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv3:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv4:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=lrelu)
    print ("Gen deconv5:", gnet6.output_shape)
    # DeConv Layer
    gnet7 = Deconv2DLayer(gnet6, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet7.output_shape)
    return gnet7 
开发者ID:WANG-Chaoyue,项目名称:EvolutionaryGAN,代码行数:31,代码来源:models_uncond.py

示例8: build_BiRNN

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_BiRNN(incoming, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                precompute_input=True, dropout=True, in_to_out=False):
    # construct the forward and backward rnns. Now, Ws are initialized by He initializer with default arguments.
    # Need to try other initializers for specific tasks.

    # dropout for incoming
    if dropout:
        incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)

    rnn_forward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                mask_input=mask, grad_clipping=grad_clipping,
                                                nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                W_in_to_hid=lasagne.init.GlorotUniform(),
                                                W_hid_to_hid=lasagne.init.GlorotUniform(), name='forward')
    rnn_backward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                 mask_input=mask, grad_clipping=grad_clipping,
                                                 nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                 W_in_to_hid=lasagne.init.GlorotUniform(),
                                                 W_hid_to_hid=lasagne.init.GlorotUniform(), backwards=True,
                                                 name='backward')

    # concatenate the outputs of forward and backward RNNs to combine them.
    concat = lasagne.layers.concat([rnn_forward, rnn_backward], axis=2, name="bi-rnn")

    # dropout for output
    if dropout:
        concat = lasagne.layers.DropoutLayer(concat, p=0.5)

    if in_to_out:
        concat = lasagne.layers.concat([concat, incoming], axis=2)

    # the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
    return concat 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:35,代码来源:networks.py

示例9: build_BiLSTM_HighCNN

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                         peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))

    # dropout after cnn?
    # if dropout:
    # output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)

    # construct highway layer
    highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)

    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
    output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:37,代码来源:networks.py

示例10: __init__

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def __init__(
            self,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=NL.tanh,
            output_b_init=None,
            weight_signal=1.0,
            weight_nonsignal=1.0, 
            weight_smc=1.0):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Discrete)
        output_b_init = compute_output_b_init(env_spec.action_space.names,
            output_b_init, weight_signal, weight_nonsignal, weight_smc)

        prob_network = MLP(
            input_shape=(env_spec.observation_space.flat_dim,),
            output_dim=env_spec.action_space.n,
            hidden_sizes=hidden_sizes,
            hidden_nonlinearity=hidden_nonlinearity,
            output_nonlinearity=NL.softmax,
            output_b_init=output_b_init
        )
        super(InitCategoricalMLPPolicy, self).__init__(env_spec, hidden_sizes,
            hidden_nonlinearity, prob_network)


# Modified from RLLab GRUNetwork 
开发者ID:vicariousinc,项目名称:pixelworld,代码行数:35,代码来源:init_policy.py

示例11: _get_activation

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def _get_activation(self, l):
        nonlinearities = {'Rectifier': nl.rectify,
                          'Sigmoid': nl.sigmoid,
                          'Tanh': nl.tanh,
                          'Softmax': nl.softmax,
                          'Linear': nl.linear,
                          'ExpLin': explin}

        assert l.type in nonlinearities,\
            "Layer type `%s` is not supported for `%s`." % (l.type, l.name)
        return nonlinearities[l.type] 
开发者ID:aigamedev,项目名称:scikit-neuralnetwork,代码行数:13,代码来源:mlp.py

示例12: build_small_action_cond_encoder_net

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_small_action_cond_encoder_net(input_shapes):
    x_shape, u_shape = input_shapes
    x2_c_dim = x1_c_dim = x_shape[0]
    x1_shape = (x1_c_dim, x_shape[1]//2, x_shape[2]//2)
    x2_shape = (x2_c_dim, x1_shape[1]//2, x1_shape[2]//2)
    y2_dim = 64
    X_var = T.tensor4('X')
    U_var = T.matrix('U')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')

    l_x1 = L.Conv2DLayer(l_x, x1_c_dim, filter_size=6, stride=2, pad=2,
                         W=init.Normal(std=0.01),
                         nonlinearity=nl.rectify)
    l_x2 = L.Conv2DLayer(l_x1, x2_c_dim, filter_size=6, stride=2, pad=2,
                         W=init.Normal(std=0.01),
                         nonlinearity=nl.rectify)

    l_y2 = L.DenseLayer(l_x2, y2_dim, nonlinearity=None, name='y')
    l_y2_diff_pred = LT.BilinearLayer([l_y2, l_u], name='y_diff_pred')
    l_y2_next_pred = L.ElemwiseMergeLayer([l_y2, l_y2_diff_pred], T.add)
    l_x2_next_pred_flat = L.DenseLayer(l_y2_next_pred, np.prod(x2_shape), nonlinearity=None)
    l_x2_next_pred = L.ReshapeLayer(l_x2_next_pred_flat, ([0],) + x2_shape)

    l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, x2_c_dim, filter_size=6, stride=2, pad=2,
                                   W=init.Normal(std=0.01),
                                   nonlinearity=nl.rectify)
    l_x_next_pred = LT.Deconv2DLayer(l_x1_next_pred, x1_c_dim, filter_size=6, stride=2, pad=2,
                                  W=init.Normal(std=0.01),
                                  nonlinearity=nl.tanh,
                                  name='x_next_pred')

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    X_diff_var = T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'SmallActionCondEncoderNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y2_diff_pred), ('y', l_y2), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:44,代码来源:net_theano.py

示例13: build_vgg_action_cond_encoder_net

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_vgg_action_cond_encoder_net(input_shapes, levels=None, x1_c_dim=16, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    xlevels_c_dim = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            xlevels_c_dim[level] = x_shape[0]
        else:
            xlevels_c_dim[level] = x1_c_dim * 2**(level-1)

    # encoding
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        else:
            l_xlevel = LT.VggEncodingLayer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_diff_pred = LT.create_bilinear_layer(l_xlevel, l_u, level, bilinear_type=bilinear_type, name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:53,代码来源:net_theano.py

示例14: build_vgg_fcn_action_cond_encoder_net

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_vgg_fcn_action_cond_encoder_net(input_shapes, levels=None, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    xlevels_c_dim = OrderedDict(zip(range(levels[-1]+1), [x_shape[0], 64, 128, 256, 512, 512]))

    # encoding
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        elif level < 3:
            l_xlevel = LT.VggEncodingLayer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
            # TODO: non-trainable encodings
            LT.set_layer_param_tags(l_xlevel, trainable=False)
        else:
            l_xlevel = LT.VggEncoding3Layer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
            # TODO: non-trainable encodings
            LT.set_layer_param_tags(l_xlevel, trainable=False)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_u_outer = LT.OuterProductLayer([l_xlevel, l_u], name='x%d_u_outer')
        l_xlevel_diff_pred = L.Conv2DLayer(l_xlevel_u_outer, xlevels_c_dim[level], filter_size=7, stride=1, pad=3, nonlinearity=None,
                                           name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:56,代码来源:net_theano.py

示例15: build_multiscale_action_cond_encoder_net

# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import tanh [as 别名]
def build_multiscale_action_cond_encoder_net(input_shapes, levels=None, bilinear_type='share', tanh=False):
    x_shape, u_shape = input_shapes
    assert len(x_shape) == 3
    assert len(u_shape) == 1
    levels = levels or [3]
    levels = sorted(set(levels))

    X_var = T.tensor4('x')
    U_var = T.matrix('u')
    X_next_var = T.tensor4('x_next')

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')

    # multi-scale pyramid
    l_xlevels = OrderedDict()
    for level in range(levels[-1]+1):
        if level == 0:
            l_xlevel = l_x
        else:
            l_xlevel = LT.Downscale2DLayer(l_xlevels[level-1], scale_factor=2, name='x%d' % level)
            # l_xlevel = L.Pool2DLayer(l_xlevels[level-1], pool_size=2, mode='average_inc_pad', name='x%d' % level)
        l_xlevels[level] = l_xlevel

    # bilinear
    l_xlevels_next_pred = OrderedDict()
    for level in levels:
        l_xlevel = l_xlevels[level]
        l_xlevel_diff_pred = LT.create_bilinear_layer(l_xlevel, l_u, level, bilinear_type=bilinear_type, name='x%d_diff_pred' % level)
        l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
                                                        name='x%d_next_pred' % level)
        if tanh:
            l_xlevels_next_pred[level].name += '_unconstrained'
            l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
                                                             name='x%d_next_pred' % level)

    pred_layers = OrderedDict([('x', l_xlevels[0]),
                               ('x_next', l_x_next),
                               ('x0_next', l_x_next),
                               ('x_next_pred', l_xlevels_next_pred[0]),
                               ])
    pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
    pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
    return pred_layers 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:47,代码来源:net_theano.py


注:本文中的lasagne.nonlinearities.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。