當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.get_all_layers方法代碼示例

本文整理匯總了Python中lasagne.layers.get_all_layers方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.get_all_layers方法的具體用法?Python layers.get_all_layers怎麽用?Python layers.get_all_layers使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在lasagne.layers的用法示例。


在下文中一共展示了layers.get_all_layers方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: example2

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def example2():
    """ Two branches"""
    # Input
    l_in = lasagne.layers.InputLayer((100, 1, 20, 20))
    # Branch one
    l_conv1 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
    l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
    l_dense1 = lasagne.layers.DenseLayer(l_pool1, num_units=20)
    # Branch two
    l_conv2 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
    l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))
    l_dense2 = lasagne.layers.DenseLayer(l_pool2, num_units=20)
    # Merge
    l_concat = lasagne.layers.ConcatLayer((l_dense1, l_dense2))
    # Output
    l_out = lasagne.layers.DenseLayer(l_concat, num_units=10)
    layers = get_all_layers(l_out)
    print(get_network_str(layers, get_network=False, incomings=True, outgoings=True))
    return None 
開發者ID:Lasagne,項目名稱:Recipes,代碼行數:21,代碼來源:network_repr.py

示例2: loadModel

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def loadModel(filename):
    print "IMPORTING MODEL PARAMS...",
    net_filename = MODEL_PATH + filename

    with open(net_filename, 'rb') as f:
        data = pickle.load(f)

    #for training, we only want to load the model params
    net = data['net']
    params = l.get_all_param_values(net)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])    

    print "DONE!" 
開發者ID:kahst,項目名稱:AcousticEventDetection,代碼行數:18,代碼來源:AED_train.py

示例3: loadPretrained

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def loadPretrained(net):

    if cfg.MODEL_NAME:

        # Load saved model
        n, c = io.loadModel(cfg.MODEL_NAME)

        # Set params
        params = l.get_all_param_values(n)
        if cfg.LOAD_OUTPUT_LAYER:
            l.set_all_param_values(net, params)
        else:
            l.set_all_param_values(l.get_all_layers(net)[:-1], params[:-2])

    return net

#################### LOSS FUNCTION ###################### 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:19,代碼來源:lasagne_net.py

示例4: train_function

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def train_function(net):

    # We use dynamic learning rates which change after some epochs
    lr_dynamic = T.scalar(name='learning_rate')

    # Theano variable for the class targets
    targets = T.matrix('targets', dtype=theano.config.floatX)

    # Get the network output
    prediction = l.get_output(net)
    
    # The theano train functions takes images and class targets as input
    log.i("COMPILING TRAIN FUNCTION...", new_line=False)
    start = time.time()
    loss = loss_function(net, prediction, targets)
    updates = net_updates(net, loss, lr_dynamic)
    train_net = theano.function([l.get_all_layers(net)[0].input_var, targets, lr_dynamic], loss, updates=updates, allow_input_downcast=True)
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return train_net

################# PREDICTION FUNCTION #################### 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:24,代碼來源:lasagne_net.py

示例5: test_function

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def test_function(net, hasTargets=True, layer_index=-1):    

    # We need the prediction function to calculate the validation accuracy
    # this way we can test the net during/after training
    # We need a version with targets and one without
    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)

    log.i("COMPILING TEST FUNCTION...", new_line=False)
    start = time.time()
    if hasTargets:
        # Theano variable for the class targets
        targets = T.matrix('targets', dtype=theano.config.floatX)
        
        loss = loss_function(net, prediction, targets)
        accuracy = accuracy_function(net, prediction, targets)
        
        test_net = theano.function([l.get_all_layers(net)[0].input_var, targets], [prediction, loss, accuracy], allow_input_downcast=True)

    else:
        test_net = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)
        
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return test_net 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:26,代碼來源:lasagne_net.py

示例6: get_objective

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def get_objective(l1=0.0, l2=0.0005):
    class RegularizedObjective(Objective):

        def get_loss(self, input=None, target=None, aggregation=None,
                     deterministic=False, **kwargs):

            l1_layer = get_all_layers(self.input_layer)[1]

            loss = super(RegularizedObjective, self).get_loss(
                input=input, target=target, aggregation=aggregation,
                deterministic=deterministic, **kwargs)
            if not deterministic:
                return loss \
                    + l1 * lasagne.regularization.regularize_layer_params(
                        l1_layer, lasagne.regularization.l1) \
                    + l2 * lasagne.regularization.regularize_network_params(
                        self.input_layer, lasagne.regularization.l2)
            else:
                return loss
    return RegularizedObjective 
開發者ID:sveitser,項目名稱:kaggle_diabetic,代碼行數:22,代碼來源:nn.py

示例7: _build

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def _build(self, forget_bias=5.0, grad_clip=10.0):
        """Build architecture
        """
        network = InputLayer(shape=(None, self.seq_length, self.input_size),
                             name='input')
        self.input_var = network.input_var

        # Hidden layers
        tanh = lasagne.nonlinearities.tanh
        gate, constant = lasagne.layers.Gate, lasagne.init.Constant
        for _ in range(self.depth):
            network = LSTMLayer(network, self.width, nonlinearity=tanh,
                                grad_clipping=grad_clip,
                                forgetgate=gate(b=constant(forget_bias)))

        # Retain last-output state
        network = SliceLayer(network, -1, 1)

        # Output layer
        sigmoid = lasagne.nonlinearities.sigmoid
        loc_layer = DenseLayer(network, self.num_outputs * 2)
        conf_layer = DenseLayer(network, self.num_outputs,
                                nonlinearity=sigmoid)

        # Grab all layers into DAPs instance
        self.network = get_all_layers([loc_layer, conf_layer])

        # Get theano expression for outputs of DAPs model
        self.loc_var, self.conf_var = get_output([loc_layer, conf_layer],
                                                 deterministic=True) 
開發者ID:escorciav,項目名稱:daps,代碼行數:32,代碼來源:sequence_encoder.py

示例8: getPredictionFuntion

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
開發者ID:kahst,項目名稱:AcousticEventDetection,代碼行數:13,代碼來源:AED_eval.py

示例9: classificationBranch

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def classificationBranch(net, kernel_size):

    # Post Convolution
    branch = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=int(FILTERS[-1] * RESNET_K),
                        filter_size=kernel_size,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tPOST  CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))

    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Dense Convolution
    branch = l.batch_norm(l.Conv2DLayer(branch,
                        num_filters=int(FILTERS[-1] * RESNET_K * 2),
                        filter_size=1,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
    
    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Class Convolution
    branch = l.Conv2DLayer(branch,
                        num_filters=len(cfg.CLASSES),
                        filter_size=1,
                        nonlinearity=None)
    return branch 
開發者ID:kahst,項目名稱:BirdNET,代碼行數:32,代碼來源:model.py

示例10: test_function

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def test_function(net, layer_index=-1):

    log.p('COMPILING THEANO TEST FUNCTION FUNCTION...', new_line=False)    

    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)    
    test_function = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)        

    log.p('DONE!')

    return test_function 
開發者ID:kahst,項目名稱:BirdNET,代碼行數:12,代碼來源:model.py

示例11: get_equivalent_input_padding

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def get_equivalent_input_padding(layer, layers_args=[]):
    """Compute the equivalent padding in the input layer

    A function to compute the equivalent padding of a sequence of
    convolutional and pooling layers. It memorizes the padding
    of all the Layers up to the first InputLayer.
    It then computes what would be the equivalent padding in the Layer
    immediately before the chain of Layers that is being taken into account.
    """
    # Initialize the DynamicPadding layers
    lasagne.layers.get_output(layer)
    # Loop through conv and pool to collect data
    all_layers = get_all_layers(layer)
    # while(not isinstance(layer, (InputLayer))):
    for layer in all_layers:
        # Note: stride is numerical, but pad *could* be symbolic
        try:
            pad, stride = (layer.pad, layer.stride)
            if isinstance(pad, int):
                pad = pad, pad
            if isinstance(stride, int):
                stride = stride, stride
            layers_args.append((pad, stride))
        except(AttributeError):
            pass

    # Loop backward to compute the equivalent padding in the input
    # layer
    tot_pad = T.zeros(2)
    pad_factor = T.ones(2)
    while(layers_args):
        pad, stride = layers_args.pop()
        tot_pad += pad * pad_factor
        pad_factor *= stride

    return tot_pad 
開發者ID:fvisin,項目名稱:reseg,代碼行數:38,代碼來源:padded.py

示例12: loadParams

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def loadParams(epoch, filename=None):
    print "IMPORTING MODEL PARAMS...",
    net_filename = MODEL_PATH + filename
    with open(net_filename, 'rb') as f:
        params = pickle.load(f)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
    print "DONE!"

################  PREDICTION SAVE/LOAD  ################## 
開發者ID:kahst,項目名稱:BirdCLEF2017,代碼行數:14,代碼來源:birdCLEF_evaluate.py

示例13: getPredictionFuntion

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(net)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
開發者ID:kahst,項目名稱:BirdCLEF2017,代碼行數:13,代碼來源:birdCLEF_evaluate.py

示例14: getPredictionFuntion

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net 
開發者ID:kahst,項目名稱:BirdCLEF2017,代碼行數:11,代碼來源:birdCLEF_test.py

示例15: loadParams

# 需要導入模塊: from lasagne import layers [as 別名]
# 或者: from lasagne.layers import get_all_layers [as 別名]
def loadParams(epoch, filename=None):
    print "IMPORTING MODEL PARAMS...",
    if filename == None:
        net_filename = MODEL_PATH + "birdCLEF_" + RUN_NAME + "_model_params_epoch_" + str(epoch) + ".pkl"
    else:
        net_filename = MODEL_PATH + filename
    with open(net_filename, 'rb') as f:
        params = pickle.load(f)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
    print "DONE!" 
開發者ID:kahst,項目名稱:BirdCLEF2017,代碼行數:15,代碼來源:birdCLEF_train.py


注:本文中的lasagne.layers.get_all_layers方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。