当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.initialize方法代码示例

本文整理汇总了Python中nolearn.lasagne.NeuralNet.initialize方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.initialize方法的具体用法?Python NeuralNet.initialize怎么用?Python NeuralNet.initialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.lasagne.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.initialize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_nnet

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
    def _create_nnet(self, input_dims, output_dims, learning_rate, num_hidden_units=15, batch_size=32, max_train_epochs=1,
                     hidden_nonlinearity=nonlinearities.rectify, output_nonlinearity=None, update_method=updates.sgd):
        """
        A subclass may override this if a different sort
        of network is desired.
        """
        nnlayers = [('input', layers.InputLayer), ('hidden', layers.DenseLayer), ('output', layers.DenseLayer)]
        nnet = NeuralNet(layers=nnlayers,

                           # layer parameters:
                           input_shape=(None, input_dims),
                           hidden_num_units=num_hidden_units,
                           hidden_nonlinearity=hidden_nonlinearity,
                           output_nonlinearity=output_nonlinearity,
                           output_num_units=output_dims,

                           # optimization method:
                           update=update_method,
                           update_learning_rate=learning_rate,

                           regression=True,  # flag to indicate we're dealing with regression problem
                           max_epochs=max_train_epochs,
                           batch_iterator_train=BatchIterator(batch_size=batch_size),
                           train_split=nolearn.lasagne.TrainSplit(eval_size=0),
                           verbose=0,
                         )
        nnet.initialize()
        return nnet
开发者ID:rihardsk,项目名称:predictive-rl,代码行数:30,代码来源:cacla_agent_nolearn.py

示例2: test_okay

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
 def test_okay(self, NeuralNet):
     net = NeuralNet(
         layers=[('input', Mock), ('mylayer', Mock)],
         input_shape=(10, 10),
         mylayer_hey='hey',
         update_foo=1,
         update_bar=2,
         )
     net._create_iter_funcs = lambda *args: (1, 2, 3)
     net.initialize()
开发者ID:dnouri,项目名称:nolearn,代码行数:12,代码来源:test_base.py

示例3: test_unused

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
    def test_unused(self, NeuralNet):
        net = NeuralNet(
            layers=[('input', Mock), ('mylayer', Mock)],
            input_shape=(10, 10),
            mylayer_hey='hey',
            yourlayer_ho='ho',
            update_foo=1,
            update_bar=2,
            )
        net._create_iter_funcs = lambda *args: (1, 2, 3)

        with pytest.raises(ValueError) as err:
            net.initialize()
        assert str(err.value) == 'Unused kwarg: yourlayer_ho'
开发者ID:dnouri,项目名称:nolearn,代码行数:16,代码来源:test_base.py

示例4: test_layers_included

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
    def test_layers_included(self, NeuralNet):
        def objective(layers_, target, **kwargs):
            out_a_layer = layers_['output_a']
            out_b_layer = layers_['output_b']

            # Get the outputs
            out_a, out_b = get_output([out_a_layer, out_b_layer])

            # Get the targets
            gt_a = T.cast(target[:, 0], 'int32')
            gt_b = target[:, 1].reshape((-1, 1))

            # Calculate the multi task loss
            cls_loss = aggregate(categorical_crossentropy(out_a, gt_a))
            reg_loss = aggregate(categorical_crossentropy(out_b, gt_b))
            loss = cls_loss + reg_loss
            return loss

        # test that both branches of the multi output network are included,
        # and also that a single layer isn't included multiple times.
        l = InputLayer(shape=(None, 1, 28, 28), name="input")
        l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
        l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)

        la = DenseLayer(l, name='hidden_a', num_units=128)
        la = DenseLayer(la, name='output_a', nonlinearity=softmax,
                        num_units=10)

        lb = DenseLayer(l, name='hidden_b', num_units=128)
        lb = DenseLayer(lb, name='output_b', nonlinearity=sigmoid, num_units=1)

        net = NeuralNet(layers=[la, lb],
                        update_learning_rate=0.5,
                        y_tensor_type=None,
                        regression=True,
                        objective=objective)
        net.initialize()

        expected_names = sorted(["input", "conv1", "conv2",
                                 "hidden_a", "output_a",
                                 "hidden_b", "output_b"])
        network_names = sorted(list(net.layers_.keys()))

        assert (expected_names == network_names)
开发者ID:leomauro,项目名称:nolearn,代码行数:46,代码来源:test_base.py

示例5: net_color_non_square

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
def net_color_non_square(NeuralNet):
    l = InputLayer(shape=(None, 3, 20, 28))
    l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=1)
    l = MaxPool2DLayer(l, name='pool1', pool_size=(2, 2))
    l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
    l = MaxPool2DLayer(l, name='pool2', pool_size=(2, 2))
    l = DenseLayer(l, name='hidden1', num_units=128)
    l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)

    net = NeuralNet(
        layers=l,

        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=1,
        )
    net.initialize()
    return net
开发者ID:AlfioEmanueleFresta,项目名称:nolearn,代码行数:22,代码来源:conftest.py

示例6: create_nn

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
def create_nn():
    '''
    Create a neural net with one (or more) layers to fit the featurized data.
    A single softmax layer is equivalent to doing logistic regression on the featurized data.
    Result:  53% accuracy.
    Adding a fully connected hiddent layer boots accuracy to 67%.
    '''
    nn = NeuralNet(
        layers = [
            (InputLayer, {
                        'name':'input',
                        'shape':(None,4096)
                         }),
            # (DropoutLayer, {
            #             'name':'drop6',
            #             'p':.5
            #             }),
            (DenseLayer, {
                        'name':'fc7',
                        'num_units':4096,
                        }),
            (DenseLayer, {
                        'name':'output',
                        'num_units':3,
                        'nonlinearity':softmax,
                        })
                        ],
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
    #         regression=True,  # flag to indicate we're dealing with regression problem
        max_epochs=1000,  # we want to train this many epochs
        verbose=1,
        train_split=TrainSplit(eval_size=0.25),

        )

    nn.initialize()

    return nn
开发者ID:k-lev,项目名称:City_Tagger,代码行数:42,代码来源:nn_softmax.py

示例7: extract_encoder

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=adadelta,
        update_learning_rate=0.01,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
开发者ID:behtak,项目名称:ip-avsr,代码行数:24,代码来源:unimodal_nodelta_with_val.py

示例8: load_encoder

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
def load_encoder(path):
    """
        load a pretrained dbn from path
        :param path: path to the .mat dbn
        :return: pretrained unrolled encoder
        """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]

    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': (None, 1200)}),
            (DenseLayer, {'name': 'l1', 'num_units': 2000, 'nonlinearity': sigmoid,
                          'W': w1, 'b': b1}),
            (DenseLayer, {'name': 'l2', 'num_units': 1000, 'nonlinearity': sigmoid,
                          'W': w2, 'b': b2}),
            (DenseLayer, {'name': 'l3', 'num_units': 500, 'nonlinearity': sigmoid,
                          'W': w3, 'b': b3}),
            (DenseLayer, {'name': 'l4', 'num_units': 50, 'nonlinearity': linear,
                          'W': w4, 'b': b4}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
开发者ID:behtak,项目名称:ip-avsr,代码行数:40,代码来源:dbn.py

示例9: net_with_nonlinearity_layer

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
def net_with_nonlinearity_layer(NeuralNet):
    l = InputLayer(shape=(None, 1, 28, 28))
    l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
    l = MaxPool2DLayer(l, name='pool1', pool_size=(2, 2))
    l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
    l = MaxPool2DLayer(l, name='pool2', pool_size=(2, 2))
    l = DenseLayer(l, name='hidden1', num_units=128)
    l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)
    l = NonlinearityLayer(l)

    net = NeuralNet(
        layers=l,

        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=5,
        on_epoch_finished=[_OnEpochFinished()],
        verbose=99,
        )
    net.initialize()
    return net
开发者ID:AlfioEmanueleFresta,项目名称:nolearn,代码行数:25,代码来源:conftest.py

示例10: make_memnn

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]

#.........这里部分代码省略.........

    layers.extend([(MemoryLayer, {'name': l_mem_names[0],
                                  'incomings': ('l_in_cont', 'l_in_cont_pe', 'l_emb_f_q'),
                                  'vocab_size': vocab_size, 'emb_size': emb_size,
                                  'A': tr_variables['WA'], 'C': tr_variables['WC'],
                                  'AT': tr_variables['WTA'], 'CT': tr_variables['WTC'], 'nonlin': nonlin})])
    for i in range(1, num_hops):
        if i%2:
            WC, WA = tr_variables['WA'], tr_variables['WC']
            WTC, WTA = tr_variables['WTA'], tr_variables['WTC']
        else:
            WA, WC = tr_variables['WA'], tr_variables['WC']
            WTA, WTC = tr_variables['WTA'], tr_variables['WTC']
        layers.extend([(MemoryLayer, {'name': l_mem_names[i],
                                      'incomings': ('l_in_cont', 'l_in_cont_pe', l_mem_names[i-1]),
                                      'vocab_size': vocab_size, 'emb_size': emb_size,
                                      'A': WA, 'C': WC, 'AT': WTA, 'CT': WTC, 'nonlin': nonlin})])
#answers-----------------------------------------------------------------------
    l_emb_f_a_names = ['l_emb_f_a{}'.format(i) for i in range(answ_n)]
    for i in range(answ_n):
        layers.extend([(EncodingFullLayer, {'name': l_emb_f_a_names[i], 'incomings': (l_a_names[i], l_a_pe_names[i]),
                                            'vocab_size': vocab_size, 'emb_size': emb_size,
                                            'W': tr_variables['WAnsw'], 'WT': None})])
#------------------------------------------------------------concatenate layers
    layers.extend([(LL.ConcatLayer, {'name': 'l_qma_concat',
                                     'incomings': l_mem_names + l_emb_f_a_names})])
#--------------------------------------------------------------------RNN layers
    layers.extend([(RNN, {'name': 'l_qa_rnn_f', 'incoming': 'l_qma_concat',
#                          'mask_input': 'l_qamask_concat',
                          'num_units': rnn_size,
                          'backwards': False, 'only_return_final': False,
                          'grad_clipping': grad_clip})])
    layers.extend([(RNN, {'name': 'l_qa_rnn_b', 'incoming': 'l_qma_concat',
#                          'mask_input': 'l_qamask_concat',
                          'num_units': rnn_size,
                          'backwards': True, 'only_return_final': False,
                          'grad_clipping': grad_clip})])

    layers.extend([(LL.SliceLayer, {'name': 'l_qa_rnn_f_sl', 'incoming': 'l_qa_rnn_f',
                                    'indices': slice(-answ_n, None), 'axis': 1})])
    layers.extend([(LL.SliceLayer, {'name': 'l_qa_rnn_b_sl', 'incoming': 'l_qa_rnn_b',
                                    'indices': slice(-answ_n, None), 'axis': 1})])

    layers.extend([(LL.ElemwiseMergeLayer, {'name': 'l_qa_rnn_conc',
                                            'incomings': ('l_qa_rnn_f_sl', 'l_qa_rnn_b_sl'),
                                            'merge_function': T.add})])
#-----------------------------------------------------------------pooling layer
#    layers.extend([(LL.DimshuffleLayer, {'name': 'l_qa_rnn_conc_',
#                                         'incoming': 'l_qa_rnn_conc', 'pattern': (0, 'x', 1)})])
    layers.extend([(LL.Pool1DLayer, {'name': 'l_qa_pool',
                                     'incoming': 'l_qa_rnn_conc',
                                     'pool_size': pool_size, 'mode': 'max'})])
#------------------------------------------------------------------dence layers
    l_dence_names = ['l_dence_{}'.format(i) for i, _ in enumerate(dence_l)]
    if dropout:
        layers.extend([(LL.DropoutLayer, {'name': 'l_dence_do', 'p': dropout})])
    for i, d in enumerate(dence_l):
        if i < len(dence_l) - 1:
            nonlin = LN.tanh
        else:
            nonlin = LN.softmax
        layers.extend([(LL.DenseLayer, {'name': l_dence_names[i], 'num_units': d,
                                        'nonlinearity': nonlin})])
        if i < len(dence_l) - 1 and dropout:
            layers.extend([(LL.DropoutLayer, {'name': l_dence_names[i] + 'do', 'p': dropout})])

    if isinstance(valid_indices, np.ndarray) or isinstance(valid_indices, list):
        train_split=TrainSplit_indices(valid_indices=valid_indices)
    else:
        train_split=TrainSplit(eval_size=valid_indices, stratify=False)

    if permute_answ or permute_cont:
        batch_iterator_train = PermIterator(batch_size, permute_answ, permute_cont)
    else:
        batch_iterator_train = BatchIterator(batch_size=batch_size)

    def loss(x, t):
        return LO.aggregate(LO.categorical_crossentropy(T.clip(x, 1e-6, 1. - 1e-6), t))
#        return LO.aggregate(LO.squared_error(T.clip(x, 1e-6, 1. - 1e-6), t))

    nnet = NeuralNet(
            y_tensor_type=T.ivector,
            layers=layers,
            update=updates,
            update_learning_rate=lr,
#            update_epsilon=1e-7,
            objective_loss_function=loss,
            regression=False,
            verbose=2,
            batch_iterator_train=batch_iterator_train,
            batch_iterator_test=BatchIterator(batch_size=batch_size/2),
#            batch_iterator_train=BatchIterator(batch_size=batch_size),
#            batch_iterator_test=BatchIterator(batch_size=batch_size),            
            #train_split=TrainSplit(eval_size=eval_size)
            train_split=train_split,
            on_batch_finished=[zero_memnn]
        )
    nnet.initialize()
    PrintLayerInfo()(nnet)
    return nnet
开发者ID:nturusin,项目名称:allenchallenge,代码行数:104,代码来源:AAI_lasagne_MemNN_7.py

示例11: create_pretrained_vgg_nn_nolearn

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]

#.........这里部分代码省略.........
                        'filter_size':(7,7),
                        'stride':2,
                        'flip_filters':False
                        }),
            (NormLayer, {
                        'name':'norm1',
                        'alpha':.0001
                        }),
            (PoolLayer, {
                        'name':'pool1',
                        'pool_size':(3,3),
                        'stride':3,
                        'ignore_border':False
                        }),
            (ConvLayer, {
                        'name':'conv2',
                        'num_filters':256,
                        'filter_size':(5,5),
                        'flip_filters':False
    #                     'pad':2,
    #                     'stride':1
                       }),
            (PoolLayer, {
                        'name':'pool2',
                        'pool_size':(2,2),
                        'stride':2,
                        'ignore_border':False
                        }),
            (ConvLayer, {
                        'name':'conv3',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                       }),
            (ConvLayer, {
                        'name':'conv4',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                        }),
            (ConvLayer, {
                        'name':'conv5',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                         }),
            (PoolLayer, {
                        'name':'pool5',
                        'pool_size':(3,3),
                        'stride':3,
                        'ignore_border':False
                        }),
            (DenseLayer,{
                        'name':'fc6',
                        'num_units':4096
                       }),
            (DropoutLayer, {
                        'name':'drop6',
                        'p':.5
                        }),
            (DenseLayer, {
                        'name':'fc7',
                        'num_units':4096
                        }),
        ],



    #        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

    #  Do not need these unless trainng the net.
    #     regression=True,  # flag to indicate we're dealing with regression problem
    #     max_epochs=400,  # we want to train this many epochs
    #     verbose=1,
    )

    # upload pretrained weights
    vgg_nn.initialize()
    vgg_nn.load_params_from('./vgg_nolearn_saved_wts_biases.pkl')

    # upload mean image
    model = pickle.load(open('./vgg_cnn_s.pkl'))
    mean_image = model['mean image']

    # pickel the model and the mean image
    with open("/data/mean_image.pkl", 'w') as f:
        pickle.dump(mean_image, f)
    with open("/data/full_vgg.pkl", 'w') as f:
        pickle.dump(vgg_nn, f)

    return vgg_net, mean_image
开发者ID:k-lev,项目名称:City_Tagger,代码行数:104,代码来源:vgg_nn_featurizer.py

示例12: zscore

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
# prediction set might too small to calculate a meaningful mean and standard deviation.
X_train_z = zscore(X_train, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_train)
X_validate_z = zscore(X_validate, train_mean, train_sdev)  #scipy.stats.mstats.zscore(X_validate)

#These can be used to check my zscore calc to numpy
#print(X_train_z)
#print(scipy.stats.mstats.zscore(X_train))

# Provide our own validation set
def my_split(self, X, y, eval_size):
    return X_train_z,X_validate_z,y_train,y_validate

net0.train_test_split = types.MethodType(my_split, net0)

# Train the network
net0.initialize()
d = extract_weights(net0)
print("D:" + str(len(d)))

#net0.fit(X_train_z,y_train)

# Predict the validation set
pred_y = net0.predict(X_validate_z)

# Display predictions and count the number of incorrect predictions.
species_names = ['setosa','versicolour','virginica']

count = 0
wrong = 0
for element in zip(X_validate,y_validate,pred_y):
    print("Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}".format(
开发者ID:azmodii,项目名称:aifh,代码行数:33,代码来源:example_iris_anneal.py

示例13: AdjustVariable

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
        #objective_loss_function=binary_crossentropy,
        objective_loss_function=multilabel_objective,
        custom_score=("validation score", lambda x, y: 1 - np.mean(np.abs(x - y))),
        max_epochs= 1200,
		#on_epoch_finished    = [
        #    AdjustVariable('update_learning_rate',start=0.00001,stop=0.000001)
            #AdjustVariable('update_momentum',start=0.9,stop=0.999)
        #],
        batch_iterator_train=BatchIterator(batch_size=250),
        #batch_iterator_train = FlipBatchIterator(batch_size=25),
        verbose=2,
        )
    print "Training NN..."
    print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    X_offset = np.mean(X_train, axis = 0)
    nnet.initialize()
    layer_info = PrintLayerInfo()
    layer_info(nnet)
    nnet.fit(X_train-X_offset,y_train)

    print "Using trained model to predict"
    print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    y_predictions = nnet.predict(X_test-X_offset)

    print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    score = 0
    for i,j in zip(y_test,y_predictions):
        temp = []
        for a in j:
            if a == max(j):
                temp.append(1.)
开发者ID:eraly,项目名称:wiki_art,代码行数:33,代码来源:preproc_conv_nn_classifier_01.py

示例14: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]

#.........这里部分代码省略.........
    ax = sns.heatmap(pd.DataFrame(cons_mat[:,idx], columns=labels + ['survival']),
                            robust=True, yticklabels=False)
    plt.xticks(rotation=90)
    ax.tick_params(axis='both', which='major', labelsize=20)
    plt.tight_layout()
    fig_path = os.path.join(OUTDIR, 'clmap_consensus.eps')
    plt.savefig(fig_path, format='eps')
    plt.close()
       
    # create an ensemble of neural networks
    ncell_cons = 3000
    ncell_voter = 3000
    layers_voter = [
                    (layers.InputLayer, {'name': 'input', 'shape': (None, nmark, ncell_voter)}),
                    (layers.Conv1DLayer, {'name': 'conv', 
                                        'num_filters': nfilter, 'filter_size': 1}),
                    (layers.Pool1DLayer, {'name': 'meanPool', 'pool_size' : ncell_voter,
                                        'mode': 'average_exc_pad'}),
                    (layers.DenseLayer, {'name': 'output',
                                        'num_units': 1,
                                        'nonlinearity': T.tanh})]
             
    # predict on the test cohort
    small_data_list_v = [x[:ncell_cons].T.reshape(1,nmark,ncell_cons) for x in validation_list]
    data_v = np.vstack(small_data_list_v)
    stime, censor = y_valid[:,0], y_valid[:,1]
    
    # committee of the best nfold/2 models
    voter_risk_pred = list()
    for ifold in np.argsort(valid_accuracy):
        voter = NeuralNet(layers = layers_voter,                
                                    update = nesterov_momentum,
                                    update_learning_rate = 0.001,
                                    regression=True,
                                    max_epochs=5,
                                    verbose=0)
        voter.load_params_from(committee[ifold])
        voter.initialize()
        # rank the risk predictions
        voter_risk_pred.append(ss.rankdata(- np.squeeze(voter.predict(data_v))))
    all_voters = np.vstack(voter_risk_pred)
                
    # compute mean rank per individual
    risk_p = np.mean(all_voters, axis=0)
    g1 = np.squeeze(risk_p > np.median(risk_p))
    voters_pval_v = logrank_pval(stime, censor, g1)
    fig_v = os.path.join(OUTDIR, 'cellCnn_cox_test.eps')
    plot_KM(stime, censor, g1, voters_pval_v, fig_v)

    # filter-activating cells
    data_t = np.vstack(small_data_list_v)
    data_stack = np.vstack([x for x in np.swapaxes(data_t, 2, 1)])
                
    # finally define a network from the consensus filters
    nfilter_cons = cons_mat.shape[0]
    ncell_cons = 3000
    layers_cons = [
                    (layers.InputLayer, {'name': 'input', 'shape': (None, nmark, ncell_cons)}),
                    (layers.Conv1DLayer, {'name': 'conv', 
                                        'b': init.Constant(cons_mat[:,-2]),
                                        'W': cons_mat[:,:-2].reshape(nfilter_cons, nmark, 1),
                                        'num_filters': nfilter_cons, 'filter_size': 1}),
                    (layers.Pool1DLayer, {'name': 'meanPool', 'pool_size' : ncell_cons,
                                        'mode': 'average_exc_pad'}),
                    (layers.DenseLayer, {'name': 'output',
                                        'num_units': 1,
                                        'W': np.sign(cons_mat[:,-1:]),
                                        'b': init.Constant(0.),
                                        'nonlinearity': T.tanh})]
            
    net_cons = NeuralNet(layers = layers_cons,                
                            update = nesterov_momentum,
                            update_learning_rate = 0.001,
                            regression=True,
                            max_epochs=5,
                            verbose=0)
    net_cons.initialize()

    # get the representation after mean pooling
    xs = T.tensor3('xs').astype(theano.config.floatX)
    act_conv = theano.function([xs], lh.get_output(net_cons.layers_['conv'], xs)) 
    
    # and apply to the test data
    act_tot = act_conv(data_t)
    act_tot = np.swapaxes(act_tot, 2, 1)
    act_stack = np.vstack([x for x in act_tot])
    idx = range(7) + [8,9]
                
    for i_map in range(nfilter_cons):
        val = act_stack[:, i_map]
        descending_order = np.argsort(val)[::-1]
        val_cumsum = np.cumsum(val[descending_order])
        data_sorted = data_stack[descending_order]
        thres = 0.75 * val_cumsum[-1]
        res_data = data_sorted[val_cumsum < thres] 
        fig_path = os.path.join(OUTDIR, 'filter_'+str(i_map)+'_active.eps')       
        plot_marker_distribution([res_data[:,idx], data_stack[:,idx]],
                                            ['filter '+str(i_map), 'all'],
                                            [labels[l] for l in idx],
                                            (3,3), fig_path, 24)
开发者ID:LazyXuan,项目名称:CellCnn,代码行数:104,代码来源:HIV_cohort.py

示例15: enumerate

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import initialize [as 别名]
        conv2d8_filter_size=(1,1),
        conv2d8_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d8_W=W[7],

        #output_nonlinearity=lasagne.nonlinearities.softmax,#,  # output layer uses identity function
        #output_num_units=1000,  # 1000 target values
        #output_W = W[7],

        # optimization method params
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=10,
        verbose=1,
        regression=True
    )
    for i, w in enumerate(W):
        print i, w.shape

    net1.initialize()
    import cv2
    from training_images import simpleProcessImage
    img = cv2.imread("/home/simon/python/sklearn-theano/sklearn_theano/datasets/images/cat_and_dog.jpg")

    crop = simpleProcessImage(img)
    cv2.imshow("X", crop)
    res = net1.predict(crop.transpose(2,0,1).reshape(-1,3,231,231))
    print res

    cv2.waitKey()
开发者ID:mosssimo,项目名称:glass,代码行数:32,代码来源:lafeat.py


注:本文中的nolearn.lasagne.NeuralNet.initialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。