当前位置: 首页>>代码示例>>Python>>正文


Python lasagne.nonlinearities方法代码示例

本文整理汇总了Python中lasagne.nonlinearities方法的典型用法代码示例。如果您正苦于以下问题:Python lasagne.nonlinearities方法的具体用法?Python lasagne.nonlinearities怎么用?Python lasagne.nonlinearities使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lasagne的用法示例。


在下文中一共展示了lasagne.nonlinearities方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_BiRNN_CNN

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                    precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
                       precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:27,代码来源:networks.py

示例2: build_BiLSTM_CNN

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                     peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:27,代码来源:networks.py

示例3: build_classfication_model_from_vgg16

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_classfication_model_from_vgg16 ():
    layer_list, vgg_whole, vgg_input_var = vgg16.vgg16.build_model();

    vgg_cut = layer_list['fc8'];

    aug_var = theano.tensor.matrix('aug_var');
    aug_layer = lasagne.layers.InputLayer(shape=(None, aug_dim), input_var = aug_var);

    layer_list['aggregate_layer'] = lasagne.layers.ConcatLayer([vgg_cut,aug_layer], axis = 1);

    layer_list['last_sigmoid'] = lasagne.layers.DenseLayer(incoming=layer_list['aggregate_layer'], num_units=n_binaryclassifier, nonlinearity=lasagne.nonlinearities.sigmoid);
    network = layer_list['last_sigmoid'];

    latter_param = [layer_list['last_sigmoid'].W, layer_list['last_sigmoid'].b];
    all_param = lasagne.layers.get_all_params(network, trainable=True);

    return network, vgg_whole, layer_list, all_param, latter_param, vgg_input_var, aug_var; 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:19,代码来源:deep_segmentation_deconv_necrosis_alt2.py

示例4: conv

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def conv(network, batch_norm, num_layers, num_filters, filter_size, pad,
         pool_size, dropout):
    for k in range(num_layers):
        network = lnn.layers.Conv2DLayer(
            network, num_filters=num_filters,
            filter_size=filter_size,
            W=lnn.init.Orthogonal(gain=np.sqrt(2 / (1 + .1 ** 2))),
            pad=pad,
            nonlinearity=lnn.nonlinearities.rectify,
            name='Conv_{}'.format(k))
        if batch_norm:
            network = lnn.layers.batch_norm(network)

    if pool_size:
        network = lnn.layers.MaxPool2DLayer(network, pool_size=pool_size,
                                            name='Pool')
    if dropout > 0.0:
        network = lnn.layers.DropoutLayer(network, p=dropout)

    return network 
开发者ID:fdlm,项目名称:chordrec,代码行数:22,代码来源:blocks.py

示例5: gap

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def gap(network, out_size, batch_norm,
        gap_nonlinearity, out_nonlinearity):

    gap_nonlinearity = getattr(lnn.nonlinearities, gap_nonlinearity)
    out_nonlinearity = getattr(lnn.nonlinearities, out_nonlinearity)

    # output classification layer
    network = lnn.layers.Conv2DLayer(
        network, num_filters=out_size, filter_size=1,
        nonlinearity=gap_nonlinearity, name='Output_Conv')
    if batch_norm:
        network = lnn.layers.batch_norm(network)

    network = lnn.layers.Pool2DLayer(
        network, pool_size=network.output_shape[-2:], ignore_border=False,
        mode='average_exc_pad', name='GlobalAveragePool')
    network = lnn.layers.FlattenLayer(network, name='Flatten')

    network = lnn.layers.NonlinearityLayer(
        network, nonlinearity=out_nonlinearity, name='output')

    return network 
开发者ID:fdlm,项目名称:chordrec,代码行数:24,代码来源:blocks.py

示例6: dense

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def dense(network, batch_norm, nonlinearity, num_layers, num_units,
          dropout):

    nl = getattr(lnn.nonlinearities, nonlinearity)

    for i in range(num_layers):
        network = lnn.layers.DenseLayer(
            network, num_units=num_units, nonlinearity=nl,
            name='fc-{}'.format(i)
        )
        if batch_norm:
            network = lnn.layers.batch_norm(network)
        if dropout > 0.0:
            network = lnn.layers.DropoutLayer(network, p=dropout)

    return network 
开发者ID:fdlm,项目名称:chordrec,代码行数:18,代码来源:blocks.py

示例7: build_critic

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    print ("critic output:", layer.output_shape)
    return layer 
开发者ID:uoguelph-mlrg,项目名称:Theano-MPI,代码行数:24,代码来源:lsgan.py

示例8: build_critic

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear and without bias)
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print ("critic output:", layer.output_shape)
    return layer 
开发者ID:uoguelph-mlrg,项目名称:Theano-MPI,代码行数:24,代码来源:wgan.py

示例9: __init__

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def __init__(self, W_in=init.GlorotUniform(), W_hid=init.GlorotUniform(),
                 W_cell=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.sigmoid):
        self.W_in = W_in
        self.W_hid = W_hid
        # Don't store a cell weight vector when cell is None
        if W_cell is not None:
            self.W_cell = W_cell
        self.b = b
        # For the nonlinearity, if None is supplied, use identity
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:16,代码来源:layers_theano.py

示例10: build_BiRNN

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_BiRNN(incoming, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                precompute_input=True, dropout=True, in_to_out=False):
    # construct the forward and backward rnns. Now, Ws are initialized by He initializer with default arguments.
    # Need to try other initializers for specific tasks.

    # dropout for incoming
    if dropout:
        incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)

    rnn_forward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                mask_input=mask, grad_clipping=grad_clipping,
                                                nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                W_in_to_hid=lasagne.init.GlorotUniform(),
                                                W_hid_to_hid=lasagne.init.GlorotUniform(), name='forward')
    rnn_backward = lasagne.layers.RecurrentLayer(incoming, num_units,
                                                 mask_input=mask, grad_clipping=grad_clipping,
                                                 nonlinearity=nonlinearity, precompute_input=precompute_input,
                                                 W_in_to_hid=lasagne.init.GlorotUniform(),
                                                 W_hid_to_hid=lasagne.init.GlorotUniform(), backwards=True,
                                                 name='backward')

    # concatenate the outputs of forward and backward RNNs to combine them.
    concat = lasagne.layers.concat([rnn_forward, rnn_backward], axis=2, name="bi-rnn")

    # dropout for output
    if dropout:
        concat = lasagne.layers.DropoutLayer(concat, p=0.5)

    if in_to_out:
        concat = lasagne.layers.concat([concat, incoming], axis=2)

    # the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
    return concat 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:35,代码来源:networks.py

示例11: build_BiLSTM_HighCNN

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                         peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))

    # dropout after cnn?
    # if dropout:
    # output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)

    # construct highway layer
    highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)

    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
    output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out) 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:37,代码来源:networks.py

示例12: __init__

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 crop=0, untie_biases=False,
                 W=initmethod(), b=lasagne.init.Constant(0.),
                 nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,
                 **kwargs):
        super(DeconvLayer, self).__init__(
                incoming, num_filters, filter_size, stride, crop, untie_biases,
                W, b, nonlinearity, flip_filters, n=2, **kwargs)
        # rename self.crop to self.pad
        self.crop = self.pad
        del self.pad 
开发者ID:ajbrock,项目名称:Neural-Photo-Editor,代码行数:13,代码来源:layers.py

示例13: get_output_for

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def get_output_for(self,input, **kwargs):
        if input.ndim > 2:
            input = input.flatten(2)
        
        activation = T.dot(input, self.W*self.weights_mask)            
        
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return self.nonlinearity(activation)        

# Conditioning Masked Layer 
# Currently not used.       
# class CML(MaskedLayer):

    # def __init__(self, incoming, num_units, mask_generator,use_cond_mask=False,U=lasagne.init.GlorotUniform(),W=lasagne.init.GlorotUniform(),
                 # b=init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
        # super(CML, self).__init__(incoming, num_units, mask_generator,W,
                 # b, nonlinearity,**kwargs)
        
        # self.use_cond_mask=use_cond_mask
        # if use_cond_mask:            
            # self.U = self.add_param(spec = U,
                                    # shape = (num_inputs, num_units),
                                    # name='U',
                                    # trainable=True,
                                    # regularizable=False)theano.shared(value=self.weights_initialization((self.n_in, self.n_out)), name=self.name+'U', borrow=True)
            # self.add_param(self.U,name = 
    # def get_output_for(self,input,**kwargs):
       # lin = self.lin_output = T.dot(input, self.W * self.weights_mask) + self.b  
       # if self.use_cond_mask:
           # lin = lin+T.dot(T.ones_like(input), self.U * self.weights_mask)
       # return lin if self._activation is None else self._activation(lin) 


       
# Made layer, adopted from M.Germain 
开发者ID:ajbrock,项目名称:Neural-Photo-Editor,代码行数:38,代码来源:layers.py

示例14: build_net

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def build_net(in_shape, out_size, model):
    # input variables
    input_var = (tt.tensor3('input', dtype='float32')
                 if len(in_shape) > 1 else
                 tt.matrix('input', dtype='float32'))
    target_var = tt.matrix('target_output', dtype='float32')

    # stack more layers
    network = lnn.layers.InputLayer(
        name='input', shape=(None,) + in_shape, input_var=input_var)

    if 'conv' in model and model['conv']:
        # reshape to 1 "color" channel
        network = lnn.layers.reshape(
            network, shape=(-1, 1) + in_shape, name='reshape')

        for c in sorted(model['conv'].keys()):
            network = blocks.conv(network, **model['conv'][c])

    # no more output layer if gap is already there!
    if 'gap' in model and model['gap']:
        network = blocks.gap(network, out_size=out_size,
                             out_nonlinearity=model['out_nonlinearity'],
                             **model['gap'])
    else:
        if 'dense' in model and model['dense']:
            network = blocks.dense(network, **model['dense'])

        # output layer
        out_nl = getattr(lnn.nonlinearities, model['out_nonlinearity'])
        network = lnn.layers.DenseLayer(
            network, name='output', num_units=out_size,
            nonlinearity=out_nl)

    return network, input_var, target_var 
开发者ID:fdlm,项目名称:chordrec,代码行数:37,代码来源:dnn.py

示例15: recurrent

# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import nonlinearities [as 别名]
def recurrent(network, mask_in, num_rec_units, num_layers, dropout,
              bidirectional, nonlinearity):

    if nonlinearity != 'LSTM':
        nl = getattr(lnn.nonlinearities, nonlinearity)

        def add_layer(prev_layer, **kwargs):
            return lnn.layers.RecurrentLayer(
                prev_layer, num_units=num_rec_units, mask_input=mask_in,
                nonlinearity=nl,
                W_in_to_hid=lnn.init.GlorotUniform(),
                W_hid_to_hid=lnn.init.Orthogonal(gain=np.sqrt(2) / 2),
                **kwargs)

    else:
        def add_layer(prev_layer, **kwargs):
            return lnn.layers.LSTMLayer(
                prev_layer, num_units=num_rec_units, mask_input=mask_in,
                **kwargs
            )

    fwd = network
    for i in range(num_layers):
        fwd = add_layer(fwd, name='rec_fwd_{}'.format(i))
        if dropout > 0.:
            fwd = lnn.layers.DropoutLayer(fwd, p=dropout)

    if not bidirectional:
        return network

    bck = network
    for i in range(num_layers):
        bck = add_layer(bck, name='rec_bck_{}'.format(i), backwards=True)
        if dropout > 0:
            bck = lnn.layers.DropoutLayer(bck, p=dropout)

    # combine the forward and backward recurrent layers...
    network = lnn.layers.ConcatLayer([fwd, bck], name='fwd + bck', axis=-1)
    return network 
开发者ID:fdlm,项目名称:chordrec,代码行数:41,代码来源:blocks.py


注:本文中的lasagne.nonlinearities方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。