当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.get_all_layers方法代码示例

本文整理汇总了Python中nolearn.lasagne.NeuralNet.get_all_layers方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.get_all_layers方法的具体用法?Python NeuralNet.get_all_layers怎么用?Python NeuralNet.get_all_layers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.lasagne.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.get_all_layers方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_all_layers [as 别名]
def train():

    # load the data
    dataset_path = "D://_Dataset//MNIST//mnist.pkl"
    # Load the dataset
    f = open(dataset_path, 'rb')
    train_set, valid_set, test_set = pickle.load(f)
    f.close()
    del f

    img_dim = 28

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', Conv2DLayer),
            ('pool1', MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('dropout2', layers.DropoutLayer),
            ('conv3', Conv2DLayer),
            ('pool3', MaxPool2DLayer),
            ('dropout3', layers.DropoutLayer),
            ('hidden4', layers.DenseLayer),
            ('dropout4', layers.DropoutLayer),
            ('hidden5', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, 1, img_dim, img_dim),
        conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
        dropout1_p=0.1,
        conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
        dropout2_p=0.2,
        conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
        dropout3_p=0.3,
        hidden4_num_units=1000,
        dropout4_p=0.5,
        hidden5_num_units=1000,
        output_num_units=1,
        output_nonlinearity=None,
        update_learning_rate=theano.shared(float32(0.03)),
        update_momentum=theano.shared(float32(0.9)),
        regression=True,
        batch_iterator_train=BatchIterator(batch_size=128),
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
            AdjustVariable('update_momentum', start=0.9, stop=0.999),
            EarlyStopping(patience=200),
        ],
        max_epochs=3000,
        verbose=1,
    )

    net
    net.get_all_layers()
开发者ID:noureldien,项目名称:TrafficSignRecognition,代码行数:58,代码来源:lasagne_experiment.py

示例2: get_random_images

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_all_layers [as 别名]
    new_size = (original_image.size[0] * 2, original_image.size[1])
    new_im = Image.new('L', new_size)
    new_im.paste(original_image, (0,0))
    rec_image = Image.fromarray(get_picture_array(X_pred, index))
    new_im.paste(rec_image, (original_image.size[0],0))
    new_im.save('data/test.png', format="PNG")

get_random_images()
IPImage('data/test.png')



## we find the encode layer from our ae, and use it to define an encoding function

encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer')
encode_layer = ae.get_all_layers()[encode_layer_index]

def get_output_from_nn(last_layer, X):
    indices = np.arange(128, X.shape[0], 128)
    sys.stdout.flush()

    # not splitting into batches can cause a memory error
    X_batches = np.split(X, indices)
    out = []
    for count, X_batch in enumerate(X_batches):
        #out.append(last_layer.get_output_for (X_batch).eval())
        out.append(lasagne.layers.get_output(last_layer, X_batch).eval())
        sys.stdout.flush()
    return np.vstack(out)

开发者ID:benmoran,项目名称:convolutional_autoencoder,代码行数:31,代码来源:mnist_conv_autoencode.py

示例3: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_all_layers [as 别名]
def main():
    # load data set
    fname = 'mnist/mnist.pkl.gz'
    if not os.path.isfile(fname):
        testfile = urllib.URLopener()
        testfile.retrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", fname)
    f = gzip.open(fname, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    X, y = train_set
    X = np.rint(X * 256).astype(np.int).reshape((-1, 1, 28, 28))  # convert to (0,255) int range (we'll do our own scaling)
    mu, sigma = np.mean(X.flatten()), np.std(X.flatten())

    X_train = X.astype(np.float64)
    X_train = (X_train - mu) / sigma
    X_train = X_train.astype(np.float32)

    # we need our target to be 1 dimensional
    X_out = X_train.reshape((X_train.shape[0], -1))

    conv_filters = 32
    deconv_filters = 32
    filter_size = 7
    epochs = 20
    encode_size = 40
    layerParam= [
        (layers.InputLayer, {'name': 'input_layer', 'shape': (None, 1, 28, 28)}),
        (layers.Conv2DLayer, {'name': 'conv', 'num_filters': conv_filters, 
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.MaxPool2DLayer, {'name': 'pool', 'pool_size': (2, 2)}),
        (layers.ReshapeLayer, {'name': 'flatten', 'shape': (([0], -1))}),
        (layers.DenseLayer, {'name': 'encode_layer', 'num_units': encode_size}),
        (layers.DenseLayer, {'name': 'hidden', 
            'num_units': deconv_filters * (28 +filter_size - 1)**2 /4}),
        (layers.ReshapeLayer, {'name': 'unflatten', 
            'shape': (([0], deconv_filters, (28 + filter_size - 1) / 2, (28 + filter_size - 1) / 2 ))}),
        (Unpool2DLayer, {'name': 'unpool', 'ds': (2, 2)}),
        (layers.Conv2DLayer, {'name': 'deconv', 'num_filters': 1,
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.ReshapeLayer, {'name': 'output_layer', 'shape': (([0], -1))})
    ]

    ae = NeuralNet(
        layers=layerParam,
        update_learning_rate = 0.01,
        update_momentum = 0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs= epochs,
        verbose=1,
        )
    ae.fit(X_train, X_out)
    print '---------------train end'
    print
    ###  expect training / val error of about 0.087 with these parameters
    ###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

    # handle the default limitation of pickle
    sys.setrecursionlimit(10000)
    pickle.dump(ae, open('mnist/conv_ae.pkl','w'))
    # ae = pickle.load(open('mnist/conv_ae.pkl','r'))
    ae.save_params_to('mnist/conv_ae.np')


    X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
    X_pred = np.rint(X_train_pred).astype(int)
    X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
    X_pred = X_pred.astype('uint8')
    print X_pred.shape , X.shape


    ###  show random inputs / outputs side by side

    for i in range(0, 10):
        get_random_images(X, X_pred, i)

    return

    ## we find the encode layer from our ae, and use it to define an encoding function

    encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer')
    print '----------encode_layer_index:', encode_layer_index
    encode_layer = ae.get_all_layers()[encode_layer_index]

    def get_output_from_nn(last_layer, X):
        indices = np.arange(128, X.shape[0], 128)
        sys.stdout.flush()

        # not splitting into batches can cause a memory error
        X_batches = np.split(X, indices)
        out = []
        for count, X_batch in enumerate(X_batches):
            out.append(layers.get_output(last_layer, X_batch).eval())
            sys.stdout.flush()
        return np.vstack(out)

    def encode_input(X):
        return get_output_from_nn(encode_layer, X)
    X_encoded = encode_input(X_train)

#.........这里部分代码省略.........
开发者ID:yinchuandong,项目名称:DBN_clustering,代码行数:103,代码来源:mnist_conv_autoencode.py

示例4: createSAE

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_all_layers [as 别名]

#.........这里部分代码省略.........
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('output_layer', layers.DenseLayer),
        ],

            input_shape=(None,10000),
            hidden_num_units= 3000,
            output_layer_num_units = 10000,

            update_learning_rate=learning_rate,
            update_momentum=update_momentum,
            update=nesterov_momentum,
            train_split=TrainSplit(eval_size=train_valid_split),
            batch_iterator_train=BatchIterator(batch_size=batch_size),
            # batch_iterator_train=FlipBatchIterator(batch_size=batch_size),
            regression=True,
            max_epochs=epochs,
            verbose=1,
            hiddenLayer_to_output=-2)

        trian_last_hiddenLayer = trian_last_hiddenLayer.astype(np.float32)

        cnn2.fit(trian_last_hiddenLayer, trian_last_hiddenLayer)
        trian_last_hiddenLayer = cnn2.output_hiddenLayer(trian_last_hiddenLayer)
        test_last_hiddenLayer = cnn2.output_hiddenLayer(test_last_hiddenLayer)

        cnn3 = NeuralNet(layers=[
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('output_layer', layers.DenseLayer),
        ],

            input_shape=(None,3000),
            hidden_num_units= 1000,
            output_layer_num_units = 3000,

            update_learning_rate=learning_rate,
            update_momentum=update_momentum,
            update=nesterov_momentum,
            train_split=TrainSplit(eval_size=train_valid_split),
            batch_iterator_train=BatchIterator(batch_size=batch_size),
            # batch_iterator_train=FlipBatchIterator(batch_size=batch_size),
            regression=True,
            max_epochs=epochs,
            verbose=1,
            hiddenLayer_to_output=-2)

        trian_last_hiddenLayer = trian_last_hiddenLayer.astype(np.float32)
        cnn3.fit(trian_last_hiddenLayer, trian_last_hiddenLayer)
        trian_last_hiddenLayer = cnn3.output_hiddenLayer(trian_last_hiddenLayer)
        test_last_hiddenLayer = cnn3.output_hiddenLayer(test_last_hiddenLayer)

        cnn4 = NeuralNet(layers=[
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('output_layer', layers.DenseLayer),
        ],

            input_shape=(None,1000),
            hidden_num_units= 300,
            output_layer_num_units = 1000,

            update_learning_rate=learning_rate,
            update_momentum=update_momentum,
            update=nesterov_momentum,
            train_split=TrainSplit(eval_size=train_valid_split),
            batch_iterator_train=BatchIterator(batch_size=batch_size),
            # batch_iterator_train=FlipBatchIterator(batch_size=batch_size),
            regression=True,
            max_epochs=epochs,
            verbose=1,
            hiddenLayer_to_output=-2)

        trian_last_hiddenLayer = trian_last_hiddenLayer.astype(np.float32)
        cnn4.fit(trian_last_hiddenLayer, trian_last_hiddenLayer)
        trian_last_hiddenLayer = cnn4.output_hiddenLayer(trian_last_hiddenLayer)
        test_last_hiddenLayer = cnn4.output_hiddenLayer(test_last_hiddenLayer)


        input_layer = cnn1.get_all_layers()[0]
        hidden1_layer = cnn1.get_all_layers()[1]
        hidden1_layer.input_layer = input_layer
        hidden2_layer = cnn2.get_all_layers()[1]
        hidden2_layer.input_layer = hidden1_layer
        hidden3_layer = cnn3.get_all_layers()[1]
        hidden3_layer.input_layer = hidden2_layer
        final_layer = cnn4.get_all_layers()[1]
        final_layer.input_layer = hidden3_layer

        #         out_train = final_layer.get_output(x_train).eval()
        #         out_test = final_layer.get_output(test_x).eval()

        f = gzip.open(folder_path + "output.pkl.gz",'wb')
        cPickle.dump((trian_last_hiddenLayer, test_last_hiddenLayer), f, protocol=2)
        f.close()
        #         f = gzip.open("pickled_images/tmp.pkl.gz", 'rb')
        #         trian_last_hiddenLayer, test_last_hiddenLayer = cPickle.load(f)
        #         f.close()

        return cnn1
开发者ID:idocoh,项目名称:ISH_Lasagne,代码行数:104,代码来源:articleCat_DAE_4.py


注:本文中的nolearn.lasagne.NeuralNet.get_all_layers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。