当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.save_params_to方法代码示例

本文整理汇总了Python中nolearn.lasagne.NeuralNet.save_params_to方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.save_params_to方法的具体用法?Python NeuralNet.save_params_to怎么用?Python NeuralNet.save_params_to使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.lasagne.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.save_params_to方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: gridsearch_alpha

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
    def gridsearch_alpha(self,learning_rate,index,params=None):
        hidden_unit = ((index+1)*2)/3
        self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
        self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
        self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
        list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
        for item in learning_rate:
            #Init Neural net
            net1 = NeuralNet(
                layers=self.network,
                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=item,
                update_momentum=0.9,
                regression=True,  # flag to indicate we're dealing with regression problem
                max_epochs=800,  # we want to train this many epochs
#                 verbose=1,
                eval_size = 0.4
            )
            net1.fit(self.X_training,self.y_training)
            self.pred = net1.predict(self.n_sample2)
            name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
            net1.save_params_to(name_file)
            score_nn = net1.score(self.n_sample2,self.n_test2)
            list_results[item] = score_nn
            print "index=%f,item=%f,score=%f"%(index,item,score_nn)
        return list_results
开发者ID:NhuanTDBK,项目名称:TrafficPrediction,代码行数:29,代码来源:nnGridSearch.py

示例2: network

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
class network(object):
    def __init__(self,X_train, Y_train):
        #self.__hidden=0

        self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
        self.net= NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer)
            ],
            input_shape=( None, X_train.shape[1] ),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,

            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train,Y_train)

    def predict(self,X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self,fname):
        self.net.save_params_to(fname)

    def loadNet(self,fname):
        self.net.load_params_from(fname)
开发者ID:hiteshpaul,项目名称:Salesforecasting,代码行数:52,代码来源:net.py

示例3: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
def main():
  pickle_file = '/mnt/Data/uniformsample_04_1k_mirror_rot_128x128_norm.cpickle'
  labels_csvfile = '/mnt/Data/trainLabels.csv'

  train_data, train_labels, test_data, test_labels = make_train_and_test_sets(pickle_file, labels_csvfile)

  train_data = train_data.reshape(-1, 3, IMAGE_SIZE, IMAGE_SIZE)
  train_data = train_data.astype('float32')
  test_data = test_data.reshape(-1, 3, imageWidth, imageWidth)
  test_data = test_data.astype('float32')

  numFeatures = train_data[1].size
  numTrainExamples = train_data.shape[0]
  print 'Features = %d' %(numFeatures)
  print 'Train set = %d' %(numTrainExamples)

  print "training data shape: ", train_data.shape
  print "training labels shape: ", train_labels.shape

  layers0 = [
             (InputLayer, {'shape': (None, X.shape[1], X.shape[2], X.shape[3])}),
           
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (Conv2DLayer, {'num_filters': 64, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 64, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (Conv2DLayer, {'num_filters': 128, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 128, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (DenseLayer, {'num_units': 600}),
             (DropoutLayer, {}),
             (DenseLayer, {'num_units': 600}),
           
             (DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
             ]

  def regularization_objective(layers, lambda1=0., lambda2=0., *args, **kwargs):
    ''' from nolearn MNIST CNN tutorial'''
    # default loss
    losses = objective(layers, *args, **kwargs)
    # get the layers' weights, but only those that should be regularized
    # (i.e. not the biases)
    weights = get_all_params(layers[-1], regularizable=True)
    # sum of absolute weights for L1
    sum_abs_weights = sum([abs(w).sum() for w in weights])
    # sum of squared weights for L2
    sum_squared_weights = sum([(w ** 2).sum() for w in weights])
    # add weights to regular loss
    losses += lambda1 * sum_abs_weights + lambda2 * sum_squared_weights
    return losses

  clf = NeuralNet(
                  layers=layers0,
                  max_epochs=5,
                 
                  # optimization method
                  update=nesterov_momentum,
                  update_momentum=0.9,
                  update_learning_rate=0.0002,
                 
                  objective=regularization_objective,
                  objective_lambda2=0.0025,
                 
                  train_split=TrainSplit(eval_size=0.1),
                  verbose=1,
                
                  )

  # load parameters from pickle file to continue training from previous epochs or smaller network
  #clf.load_params_from('params1.pickle')
  #clf.initialize()

  for i in range(100):
    print '******************************  ',i,'  ******************************'

    clf.fit(train_data, train_labels)

    clf.save_params_to('params2.pickle')

    preds = clf.predict(test_data)
    #print sum(preds)
    print "Test data accuracy: ", 1.0*sum(preds==test_labels)/test_labels.shape[0]
开发者ID:nathanieljblack,项目名称:W251_Project,代码行数:90,代码来源:nolearncnn_04.py

示例4: create_stack

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
normal_stack = create_stack(N)

print("Made stack!")

for k in range(0, 1000):
    saved_accuracy = 10011.0
    data = np.array(normal_stack + random.sample(coords, N))
    val = np.append(np.zeros(N), np.ones(N))
    data, val = shuffle(data, val)
    for i in range(0, int(EPOCHS)):
        nn.fit(data, val)
        cur_accuracy = nn.train_history_[-1]['valid_loss']
        if cur_accuracy - 0.004 > saved_accuracy:
            print("Test Loss Jump! Loading previous network!")
            with suppress_stdout():
                nn.load_params_from("cachedgooglenn2.params")
        else:
            nn.save_params_to('cachedgooglenn2.params')
            saved_accuracy = cur_accuracy
        nn.update_learning_rate *= DECAY

    normal_stack = update_stack(normal_stack, int(K*N), nn)

    print("Data Report: K={3:.2f}, Prob Before={0}, Prob After={1}, Overlap={2}".format(proba_before, proba_after, overlap, K))
    K += KGROWTH
    EPOCHS *= EGROWTH
    for r in range(len(nn.train_history_)):
        nn.train_history_[r]['train_loss'] = 10011.0

nn.save_params_to('googlenn2.params')
开发者ID:nikcheerla,项目名称:TCGA-Mitosis,代码行数:32,代码来源:googlenet.py

示例5: print

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
X = X_compressed[()]
y = y_compressed[()]
print('Loaded Data')

# Model Specifications
net = phf.build_GoogLeNet(img_width, img_height)
values = pickle.load(open('\models\\blvc_googlenet.pkl', 'rb'))['param values'][:-2]
lasagne.layers.set_all_param_values(net['pool5/7x7_s1'], values)

# Shift image array to BGR for pretrained caffe models
X = X[:, ::-1, :, :]


net0 = NeuralNet(
    net['softmax'],
    max_epochs=300,
    update=adam,
    update_learning_rate=.00001, #start with a really low learning rate
    #objective_l2=0.0001, 
    
    batch_iterator_train = BatchIterator(batch_size=32),
    batch_iterator_test = BatchIterator(batch_size=32),

    train_split=TrainSplit(eval_size=0.2),
    verbose=3,
)


net0.fit(X, y)
net0.save_params_to('ModelWeights')
开发者ID:SyRenity,项目名称:movie-posters,代码行数:32,代码来源:Poster_train.py

示例6: open

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
    verbose=1,
    )
ae.fit(X_train, X_out)
print
###  expect training / val error of about 0.087 with these parameters
###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step



import pickle
import sys
sys.setrecursionlimit(10000)

pickle.dump(ae, open('mnist/conv_ae.pkl','w'))
#ae = pickle.load(open('mnist/conv_ae.pkl','r'))
ae.save_params_to('mnist/conv_ae.np')



X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
X_pred = np.rint(X_train_pred).astype(int)
X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
X_pred = X_pred.astype('uint8')
print X_pred.shape , X.shape



###  show random inputs / outputs side by side

def get_picture_array(X, index):
    array = X[index].reshape(28,28)
开发者ID:benmoran,项目名称:convolutional_autoencoder,代码行数:33,代码来源:mnist_conv_autoencode.py

示例7: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
def main():
    # load data set
    fname = 'mnist/mnist.pkl.gz'
    if not os.path.isfile(fname):
        testfile = urllib.URLopener()
        testfile.retrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", fname)
    f = gzip.open(fname, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    X, y = train_set
    X = np.rint(X * 256).astype(np.int).reshape((-1, 1, 28, 28))  # convert to (0,255) int range (we'll do our own scaling)
    mu, sigma = np.mean(X.flatten()), np.std(X.flatten())

    X_train = X.astype(np.float64)
    X_train = (X_train - mu) / sigma
    X_train = X_train.astype(np.float32)

    # we need our target to be 1 dimensional
    X_out = X_train.reshape((X_train.shape[0], -1))

    conv_filters = 32
    deconv_filters = 32
    filter_size = 7
    epochs = 20
    encode_size = 40
    layerParam= [
        (layers.InputLayer, {'name': 'input_layer', 'shape': (None, 1, 28, 28)}),
        (layers.Conv2DLayer, {'name': 'conv', 'num_filters': conv_filters, 
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.MaxPool2DLayer, {'name': 'pool', 'pool_size': (2, 2)}),
        (layers.ReshapeLayer, {'name': 'flatten', 'shape': (([0], -1))}),
        (layers.DenseLayer, {'name': 'encode_layer', 'num_units': encode_size}),
        (layers.DenseLayer, {'name': 'hidden', 
            'num_units': deconv_filters * (28 +filter_size - 1)**2 /4}),
        (layers.ReshapeLayer, {'name': 'unflatten', 
            'shape': (([0], deconv_filters, (28 + filter_size - 1) / 2, (28 + filter_size - 1) / 2 ))}),
        (Unpool2DLayer, {'name': 'unpool', 'ds': (2, 2)}),
        (layers.Conv2DLayer, {'name': 'deconv', 'num_filters': 1,
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.ReshapeLayer, {'name': 'output_layer', 'shape': (([0], -1))})
    ]

    ae = NeuralNet(
        layers=layerParam,
        update_learning_rate = 0.01,
        update_momentum = 0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs= epochs,
        verbose=1,
        )
    ae.fit(X_train, X_out)
    print '---------------train end'
    print
    ###  expect training / val error of about 0.087 with these parameters
    ###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

    # handle the default limitation of pickle
    sys.setrecursionlimit(10000)
    pickle.dump(ae, open('mnist/conv_ae.pkl','w'))
    # ae = pickle.load(open('mnist/conv_ae.pkl','r'))
    ae.save_params_to('mnist/conv_ae.np')


    X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
    X_pred = np.rint(X_train_pred).astype(int)
    X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
    X_pred = X_pred.astype('uint8')
    print X_pred.shape , X.shape


    ###  show random inputs / outputs side by side

    for i in range(0, 10):
        get_random_images(X, X_pred, i)

    return

    ## we find the encode layer from our ae, and use it to define an encoding function

    encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer')
    print '----------encode_layer_index:', encode_layer_index
    encode_layer = ae.get_all_layers()[encode_layer_index]

    def get_output_from_nn(last_layer, X):
        indices = np.arange(128, X.shape[0], 128)
        sys.stdout.flush()

        # not splitting into batches can cause a memory error
        X_batches = np.split(X, indices)
        out = []
        for count, X_batch in enumerate(X_batches):
            out.append(layers.get_output(last_layer, X_batch).eval())
            sys.stdout.flush()
        return np.vstack(out)

    def encode_input(X):
        return get_output_from_nn(encode_layer, X)
    X_encoded = encode_input(X_train)

#.........这里部分代码省略.........
开发者ID:yinchuandong,项目名称:DBN_clustering,代码行数:103,代码来源:mnist_conv_autoencode.py

示例8: NeuralNet

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
                 'flip_filters':False,
                 'W':layer_w_b['conv5'][0],
                 'b':layer_w_b['conv5'][1]}),
    (PoolLayer, {'name':'pool5', 'pool_size': 3, 'stride':3, 'ignore_border':False}), 
    (DenseLayer, {'name':'fc6',
                  'num_units': 4096, 
                  'W': layer_w_b['fc6'][0],
                  'b': layer_w_b['fc6'][1] }),
    (DropoutLayer, {'name': 'drop6', 'p': 0.5 }),
    (DenseLayer, {'name':'fc7',
                  'num_units': 4096, 
                  'W': layer_w_b['fc7'][0],
                  'b': layer_w_b['fc7'][1] })
]

net0 = NeuralNet(
    layers=layers0,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
  #  regression=True,  # flag to indicate we're dealing with regression problem
  #  max_epochs=400,  # we want to train this many epochs
    verbose=1,
)

#initialize nolearn net
net0.initialize()

#save weights and biases to the file for future use
net0.save_params_to('nolearn_with_w_b.pkl')
开发者ID:Naunett,项目名称:cars_project,代码行数:32,代码来源:nolearn_load_weights.py

示例9:

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
        ],
    input_shape = (None, 1, 20, 20),
    conv_num_filters = 32, conv_filter_size = (3, 3), 
    pool_pool_size = (2, 2),
	hidden_num_units = 50,
    output_num_units = 2, output_nonlinearity = softmax,

    update_learning_rate=0.01,
    update_momentum = 0.9,

    regression = False,
    max_epochs = 60,
    verbose = 1,
    )

net.fit(train_x, train_y)	
net.save_params_to(CNN_Weights) 

train_loss = np.array([i["train_loss"] for i in net.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net.train_history_])
pyplot.plot(train_loss, linewidth=3, label="train")
pyplot.plot(valid_loss, linewidth=3, label="valid")
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("loss")
pyplot.ylim(1e-3, 1.5e-1)
pyplot.yscale("log")
pyplot.show()
开发者ID:SPaterakis,项目名称:Face_Detection,代码行数:33,代码来源:CNN_training.py

示例10: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
def main():
    # load data set
    fname = 'imagenet.pkl'
    train_set = pickle.load(open(fname, 'r'))
    X = train_set[0:1000]
    # X = X.astype(np.int).reshape((-1, 3, 256, 256))  # convert to (0,255) int range (we'll do our own scaling)
    # sigma = np.std(X.flatten())
    # mu = np.mean(X.flatten())

    # print np.shape(X[0])
    # print mu
    # print sigma
    # return
    # # <codecell>
    # X_train = X.astype(np.float64)
    # X_train = (X_train - mu) / sigma
    # X_train = X_train.astype(np.float32)
    X_train = X.astype(np.float32)

    # we need our target to be 1 dimensional
    X_out = X_train.reshape((X_train.shape[0], -1))

    # <codecell>
    conv_filters = 32
    deconv_filters = 32
    filter_sizes = 7
    epochs = 20
    encode_size = 40
    ae = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv', layers.Conv2DLayer),
            ('pool', layers.MaxPool2DLayer),
            ('flatten', layers.ReshapeLayer),  # output_dense
            ('encode_layer', layers.DenseLayer),
            ('hidden', layers.DenseLayer),  # output_dense
            ('unflatten', layers.ReshapeLayer),
            ('unpool', Unpool2DLayer),
            ('deconv', layers.Conv2DLayer),
            ('output_layer', layers.ReshapeLayer),
            ],
        input_shape=(None, 3, 256, 256),
        conv_num_filters=conv_filters, conv_filter_size = (filter_sizes, filter_sizes),
        conv_nonlinearity=None,
        pool_pool_size=(2, 2),
        flatten_shape=(([0], -1)), # not sure if necessary?
        encode_layer_num_units = encode_size,
        hidden_num_units= deconv_filters * (256 + filter_sizes - 1) ** 2 / 4,
        unflatten_shape=(([0], deconv_filters, (256 + filter_sizes - 1) / 2, (256 + filter_sizes - 1) / 2 )),
        unpool_ds=(2, 2),
        deconv_num_filters=1, deconv_filter_size = (filter_sizes, filter_sizes),
        deconv_nonlinearity=None,
        output_layer_shape = (([0], -1)),
        update_learning_rate = 0.01,
        update_momentum = 0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs= epochs,
        verbose=1,
        )
    ae.fit(X_train, X_out)
    print '---------------train end'
    print
    ###  expect training / val error of about 0.087 with these parameters
    ###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

    # <codecell>


    pickle.dump(ae, open('mnist/my_conv_ae.pkl','w'))
    # ae = pickle.load(open('mnist/my_conv_ae.pkl','r'))
    ae.save_params_to('mnist/my_conv_ae.np')

    # <codecell>

    X_train_pred = ae.predict(X_train).reshape(-1, 256, 256) * sigma + mu
    X_pred = np.rint(X_train_pred).astype(int)
    X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
    X_pred = X_pred.astype('uint8')
    print X_pred.shape , X.shape

    # <codecell>

    ###  show random inputs / outputs side by side

    for i in range(0, 10):
        get_random_images(X, X_pred, i)

    return


    return
开发者ID:yinchuandong,项目名称:DBN_clustering,代码行数:94,代码来源:mnist_conv_test.py

示例11: confusion_matrix

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
    accuracy = correct/float(len(y_test))
    print 'Correctly predicted: %f\n\n'%accuracy

    #confusion matrix to visualize the predictions
    cm = confusion_matrix(preds,y_test)
    plt.matshow(cm)
    plt.title('Confusion matrix')
    plt.colorbar()

    #plt.clim(0) to have the lower range of 0 (else darkblue will be minimum value)
    #plt.clim(0)
    plt.ylabel('Predicted label')
    plt.xlabel('True label')

    #save the weights
    net1.save_params_to('../data/saved_moedel')

    #visualize the weights and the confusion matrix
    #visualize.plot_conv_weights(net1.layers_['conv2d2'])
    plt.show()

    #show the "shifted" distribution of the specific feature
    cancer = []
    noncancer = []
    for point,result in zip(X_test,y_test):
        if result == 1:
            cancer.append(point[0][0][3])
            #plt.imshow(point[0], interpolation='nearest')
            #plt.title('Cancer')
        else:
            noncancer.append(point[0][0][3])
开发者ID:mcaldera,项目名称:Machine_Learning,代码行数:33,代码来源:Final_network.py

示例12: open

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
        output_num_units=len(z),  # 30 target values
        #dropout1_p=0.1,
        #dropout2_p=0.1,
        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.3,

        regression=False,  # flag to indicate we're dealing with regression problem
        max_epochs=1000,  # we want to train this many epochs
        verbose=1,
        )

    net1.fit(X, y)

    with open('net1.pickle', 'wb') as f:
        pickle.dump(net1, f, -1)

    net1.save_params_to("net1_params.pkl")

    train_loss = np.array([i["train_loss"] for i in net1.train_history_])
    valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
    pyplot.plot(train_loss, linewidth=3, label="train")
    pyplot.plot(valid_loss, linewidth=3, label="valid")
    pyplot.grid()
    pyplot.legend()
    pyplot.xlabel("epoch")
    pyplot.ylabel("loss")
    #pyplot.ylim(1e-3, 1e-2)
    pyplot.yscale("log")
    pyplot.show()
开发者ID:mosssimo,项目名称:glass,代码行数:33,代码来源:train.py

示例13: str

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]

"""Loading data and training Lasagne network using nolearn"""

trainVal2 = trainVal2
print trainImg2.shape

print "Ratio: " + str(1.0 - float(sum(trainVal2)) / float(len(trainVal2)))

best_accuracy = 0.0
print "Training Classifier: 80/20 split"
for i in [1, 2, 3, 4, 6, 8, 10, 40, 100, 250]:
    saved_accuracy = 0.0
    print "Size: " + str(i*2000)
    for epoch in range(0, 25):
        nn = nn.fit(trainImg2[0:2000*i], trainVal2[0:2000*i])
        cur_accuracy = nn.train_history_[-1]['valid_accuracy']
        best_accuracy = max(cur_accuracy, best_accuracy)
        #print "Current Accuracy: " + str(cur_accuracy)
        #print "Saved Accuracy: " + str(saved_accuracy)
        if cur_accuracy + 0.04 < saved_accuracy or cur_accuracy + 0.12 < best_accuracy:
            print "Accuracy Drop! Loading previous network!"
            nn.load_params_from("cachednn.params")
        else:
            nn.save_params_to('cachednn.params')
            saved_accuracy = cur_accuracy

nn.save_params_to('nn_stage2.params')

#pickle.dump(nn, open( "nn_stage2.pkl", "wb" ))
开发者ID:nikcheerla,项目名称:TCGA-Mitosis,代码行数:31,代码来源:train_net_stage2.py

示例14: createCSAE

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]

#.........这里部分代码省略.........
            # conv41_border_mode="same",
            conv41_pad="same",
            conv42_num_filters=layers_size[3], conv42_filter_size=filter_4, conv42_nonlinearity=activation,
            # conv42_border_mode="same",
            conv42_pad="same",

            unpool2_ds=(2, 2),

            conv5_num_filters=layers_size[4], conv5_filter_size=filter_5, conv5_nonlinearity=activation,
            # conv5_border_mode="same",
            conv5_pad="same",
            conv51_num_filters=layers_size[4], conv51_filter_size=filter_5, conv51_nonlinearity=activation,
            # conv51_border_mode="same",
            conv51_pad="same",
            conv52_num_filters=layers_size[4], conv52_filter_size=filter_5, conv52_nonlinearity=activation,
            # conv52_border_mode="same",
            conv52_pad="same",

            conv6_num_filters=1, conv6_filter_size=filter_6, conv6_nonlinearity=last_layer_activation,
            # conv6_border_mode="same",
            conv6_pad="same",

            output_layer_shape=(([0], -1)),

            update_learning_rate=learning_rate,
            update_momentum=update_momentum,
            update=nesterov_momentum,
            train_split=TrainSplit(eval_size=train_valid_split),
            batch_iterator_train=FlipBatchIterator(batch_size=batch_size) if flip_batch else BatchIterator(batch_size=batch_size),
            regression=True,
            max_epochs=epochs,
            verbose=1,
            hiddenLayer_to_output=-11)

        cnn.fit(X_train, X_out)

        try:
            pickle.dump(cnn, open(folder_path + CONV_AE_PKL, 'w'))
            # cnn = pickle.load(open(folder_path + CONV_AE_PKL,'r'))
            # cnn.save_weights_to(folder_path + CONV_AE_NP)
            cnn.save_params_to(folder_path + CONV_AE_PARAMS_PKL)
        except:
            print ("Could not pickle cnn")

        X_pred = cnn.predict(X_train).reshape(-1, input_height, input_width)  # * sigma + mu
        # # X_pred = np.rint(X_pred).astype(int)
        # # X_pred = np.clip(X_pred, a_min=0, a_max=255)
        # # X_pred = X_pred.astype('uint8')
        #
        # try:
        #     trian_last_hiddenLayer = cnn.output_hiddenLayer(X_train)
        #     # test_last_hiddenLayer = cnn.output_hiddenLayer(test_x)
        #     pickle.dump(trian_last_hiddenLayer, open(folder_path + 'encode.pkl', 'w'))
        # except:
        #     print "Could not save encoded images"

        print ("Saving some images....")
        for i in range(10):
            index = np.random.randint(X_train.shape[0])
            print (index)

            def get_picture_array(X, index):
                array = np.rint(X[index] * 256).astype(np.int).reshape(input_height, input_width)
                array = np.clip(array, a_min=0, a_max=255)
                return array.repeat(4, axis=0).repeat(4, axis=1).astype(np.uint8())

            original_image = Image.fromarray(get_picture_array(X_out, index))
            # original_image.save(folder_path + 'original' + str(index) + '.png', format="PNG")
            #
            # array = np.rint(trian_last_hiddenLayer[index] * 256).astype(np.int).reshape(input_height/2, input_width/2)
            # array = np.clip(array, a_min=0, a_max=255)
            # encode_image = Image.fromarray(array.repeat(4, axis=0).repeat(4, axis=1).astype(np.uint8()))
            # encode_image.save(folder_path + 'encode' + str(index) + '.png', format="PNG")

            new_size = (original_image.size[0] * 3, original_image.size[1])
            new_im = Image.new('L', new_size)
            new_im.paste(original_image, (0, 0))
            pred_image = Image.fromarray(get_picture_array(X_pred, index))
            # pred_image.save(folder_path + 'pred' + str(index) + '.png', format="PNG")
            new_im.paste(pred_image, (original_image.size[0], 0))

            noise_image = Image.fromarray(get_picture_array(X_train, index))
            new_im.paste(noise_image, (original_image.size[0]*2, 0))
            new_im.save(folder_path+'origin_prediction_noise-'+str(index)+'.png', format="PNG")

            # diff = ImageChops.difference(original_image, pred_image)
            # diff = diff.convert('L')
            # diff.save(folder_path + 'diff' + str(index) + '.png', format="PNG")

            # plt.imshow(new_im)
            # new_size = (original_image.size[0] * 2, original_image.size[1])
            # new_im = Image.new('L', new_size)
            # new_im.paste(original_image, (0, 0))
            # pred_image = Image.fromarray(get_picture_array(X_train, index))
            # # pred_image.save(folder_path + 'noisyInput' + str(index) + '.png', format="PNG")
            # new_im.paste(pred_image, (original_image.size[0], 0))
            # new_im.save(folder_path+'origin_VS_noise-'+str(index)+'.png', format="PNG")
            # plt.imshow(new_im)

        return cnn
开发者ID:idocoh,项目名称:ISH_Lasagne,代码行数:104,代码来源:articleCat_DAE.py

示例15:

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import save_params_to [as 别名]
    # conv2d4_num_filters = 16,
    # conv2d4_filter_size = (2,2),
    # conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

    # maxpool4_pool_size = (2,2),

    dropout1_p=0.5,

    # dropout2_p = 0.5,

    dense_num_units=16,
    dense_nonlinearity=lasagne.nonlinearities.rectify,

    # dense2_num_units = 16,
    # dense2_nonlinearity = lasagne.nonlinearities.rectify,

    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=2,

    update=nesterov_momentum,
    update_learning_rate=0.003,
    update_momentum=0.9,
    max_epochs=1000,
    verbose=1,
)

nn = net1.fit(X_train, y_train)  # Train CNN

net1.save_params_to("/Users/Pedro/PycharmProjects/bidhu/docs/train.txt")  # Save CNN parameters
开发者ID:pedfx,项目名称:BIDHU,代码行数:31,代码来源:train.py


注:本文中的nolearn.lasagne.NeuralNet.save_params_to方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。