当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.load_params_from方法代码示例

本文整理汇总了Python中nolearn.lasagne.NeuralNet.load_params_from方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.load_params_from方法的具体用法?Python NeuralNet.load_params_from怎么用?Python NeuralNet.load_params_from使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.lasagne.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.load_params_from方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
	def __call__(self, nn, train_history):
	    current_valid = train_history[-1]['valid_loss']
	    current_epoch = train_history[-1]['epoch']
	    if current_valid < self.best_valid:
	        self.best_valid = current_valid
	        self.best_valid_epoch = current_epoch
	        self.best_weights = nn.get_all_params_values()
	    elif self.best_valid_epoch + self.patience < current_epoch:
	        print("Early stopping.")
	        print("Best valid loss was {:.6f} at epoch {}.".format(
	            self.best_valid, self.best_valid_epoch))
	        nn.load_params_from(self.best_weights)
	        raise StopIteration()
开发者ID:thewayofknowing,项目名称:Kaggle,代码行数:15,代码来源:script.py

示例2: network

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
class network(object):
    def __init__(self,X_train, Y_train):
        #self.__hidden=0

        self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
        self.net= NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer)
            ],
            input_shape=( None, X_train.shape[1] ),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,

            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train,Y_train)

    def predict(self,X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self,fname):
        self.net.save_params_to(fname)

    def loadNet(self,fname):
        self.net.load_params_from(fname)
开发者ID:hiteshpaul,项目名称:Salesforecasting,代码行数:52,代码来源:net.py

示例3: loadNet2

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def loadNet2(netName):
    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),
            ('pool1', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),  # !
            ('conv2', layers.Conv2DLayer),
            ('pool2', layers.MaxPool2DLayer),
            ('dropout2', layers.DropoutLayer),  # !
            ('conv3', layers.Conv2DLayer),
            ('pool3', layers.MaxPool2DLayer),
            ('dropout3', layers.DropoutLayer),  # !
            ('hidden4', layers.DenseLayer),
            ('dropout4', layers.DropoutLayer),  # !
            ('hidden5', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, 1, 96, 96),
        conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
        dropout1_p=0.1,  # !
        conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
        dropout2_p=0.2,  # !
        conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
        dropout3_p=0.3,  # !
        hidden4_num_units=1000,  # !
        dropout4_p=0.5,
        hidden5_num_units=1000,  # !
        output_num_units=30, output_nonlinearity=None,

        update_learning_rate=theano.shared(float32(0.03)),
        update_momentum=theano.shared(float32(0.9)),

        regression=True,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
            AdjustVariable('update_momentum', start=0.9, stop=0.999),
            EarlyStopping(patience=200),
            backupCNN,
        ],
        max_epochs=10000,
        verbose=1,
    )

    net.load_params_from(netName)

    return net
开发者ID:kanak87,项目名称:oldboy_rep,代码行数:50,代码来源:nn2.py

示例4: CNN

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
class CNN(object):
	__metaclass__ = Singleton
	channels = 3
	image_size = [64,64]
	layers = [ 
		# layer dealing with the input data
		(InputLayer, {'shape': (None, channels, image_size[0], image_size[1])}),
		# first stage of our convolutional layers 
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 9}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# second stage of our convolutional layers
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# two dense layers with dropout
		(DenseLayer, {'num_units': 256}),
		(DropoutLayer, {}),
		(DenseLayer, {'num_units': 256}),
		# the output layer
		(DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
	]
	def __init__(self):
		logger = logging.getLogger(__name__)
		logger.info("Initializing neural net...")
		self.net = NeuralNet(layers=self.layers, update_learning_rate=0.0002 )
		self.net.load_params_from("conv_params")
		logger.info("Finished loading parameters")
	
	def resize(self, infile):
		try:
			im = Image.open(infile)
			resized_im = np.array(ImageOps.fit(im, (self.image_size[0], self.image_size[1]), Image.ANTIALIAS), dtype=np.uint8)
			rgb = np.array([resized_im[:,:,0], resized_im[:,:,1], resized_im[:,:,2]])
			return rgb.reshape(1,self.channels,self.image_size[0],self.image_size[1])

		except IOError:
			return "cannot create thumbnail for '%s'" % infile

	def predict(self, X):
		porn = self.net.predict(X)[0] == 1
		return "true" if porn else "false"
开发者ID:cid2105,项目名称:dirty-image-classifier,代码行数:44,代码来源:models.py

示例5: load_finetuned_dbn

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def load_finetuned_dbn(path):
    """
    Load a fine tuned Deep Belief Net from file
    :param path: path to deep belief net parameters
    :return: deep belief net
    """
    dbn = NeuralNet(
        layers=[
            ('input', las.layers.InputLayer),
            ('l1', las.layers.DenseLayer),
            ('l2', las.layers.DenseLayer),
            ('l3', las.layers.DenseLayer),
            ('l4', las.layers.DenseLayer),
            ('l5', las.layers.DenseLayer),
            ('l6', las.layers.DenseLayer),
            ('l7', las.layers.DenseLayer),
            ('output', las.layers.DenseLayer)
        ],
        input_shape=(None, 1200),
        l1_num_units=2000, l1_nonlinearity=sigmoid,
        l2_num_units=1000, l2_nonlinearity=sigmoid,
        l3_num_units=500, l3_nonlinearity=sigmoid,
        l4_num_units=50, l4_nonlinearity=linear,
        l5_num_units=500, l5_nonlinearity=sigmoid,
        l6_num_units=1000, l6_nonlinearity=sigmoid,
        l7_num_units=2000, l7_nonlinearity=sigmoid,
        output_num_units=1200, output_nonlinearity=linear,
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    with open(path, 'rb') as f:
        pretrained_nn = pickle.load(f)
    if pretrained_nn is not None:
        dbn.load_params_from(path)
    return dbn
开发者ID:behtak,项目名称:ip-avsr,代码行数:41,代码来源:unimodal_nodelta.py

示例6: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def main():
    data = load_av_letters('data/allData_mouthROIs.mat')

    # create the necessary variable mappings
    data_matrix = data['dataMatrix']
    data_matrix_len = data_matrix.shape[0]
    targets_vec = data['targetsVec']
    vid_len_vec = data['videoLengthVec']
    iter_vec = data['iterVec']

    indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)

    # split the data
    train_data = data_matrix[indexes == True]
    train_targets = targets_vec[indexes == True]
    test_data = data_matrix[indexes == False]
    test_targets = targets_vec[indexes == False]

    idx = [i for i, elem in enumerate(test_targets) if elem == 20]

    print(train_data.shape)
    print(test_data.shape)
    print(sum([train_data.shape[0], test_data.shape[0]]))

    # resize the input data to 40 x 30
    train_data_resized = resize_images(train_data).astype(np.float32)

    # normalize the inputs [0 - 1]
    train_data_resized = normalize_input(train_data_resized, centralize=True)

    test_data_resized = resize_images(test_data).astype(np.float32)
    test_data_resized = normalize_input(test_data_resized, centralize=True)

    dic = {}
    dic['trainDataResized'] = train_data_resized
    dic['testDataResized'] = test_data_resized

    """second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
    encode_size = 2500
    sigma = 0.5

    # to get tied weights in the encoder/decoder, create this shared weightMatrix
    # 1200 x 2000
    w1, layer1 = build_encoder_layers(1200, 2500, sigma)

    ae1 = NeuralNet(
        layers=layer1,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load = True
    save = False
    if load:
        print('[LOAD] layer 1...')
        ae1.load_params_from('layer1.dat')
    else:
        print('[TRAIN] layer 1...')
        ae1.fit(train_data_resized, train_data_resized)

    # save params
    if save:
        print('[SAVE] layer 1...')
        ae1.save_params_to('layer1.dat')

    train_encoded1 = ae1.get_output('encoder', train_data_resized)  # 12293 x 2000

    w2, layer2 = build_encoder_layers(2500, 1250)
    ae2 = NeuralNet(
        layers=layer2,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load2 = True
    if load2:
        print('[LOAD] layer 2...')
        ae2.load_params_from('layer2.dat')
    else:
        print('[TRAIN] layer 2...')
        ae2.fit(train_encoded1, train_encoded1)

    save2 = False
    if save2:
        print('[SAVE] layer 2...')
        ae2.save_params_to('layer2.dat')

    train_encoded2 = ae2.get_output('encoder', train_encoded1)  # 12293 x 1250

    w3, layer3 = build_encoder_layers(1250, 600)
    ae3 = NeuralNet(
        layers=layer3,
        max_epochs=100,
        objective_loss_function=squared_error,
#.........这里部分代码省略.........
开发者ID:behtak,项目名称:ip-avsr,代码行数:103,代码来源:sde_autoencoder.py

示例7: load_data

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
    objective_loss_function=objectives.categorical_crossentropy,

    update=updates.adam,

    batch_iterator_train=train_iterator,
    batch_iterator_test=test_iterator,

    on_epoch_finished=[
        save_weights,
        save_training_history,
        plot_training_history
    ],

    verbose=10,
    max_epochs=250,
)


if __name__ == '__main__':
    # X_train, X_test are image file names
    # They will be read in the iterator
    X_train, X_test, y_train, y_test = load_data(test_size=0.25, random_state=42)

    net.fit(X_train, y_train)

    # Load the best weights from pickled model
    net.load_params_from('./examples/cifar10/model_weights.pkl')

    score = net.score(X_test, y_test)
    print 'Final score %.4f' % score
开发者ID:Lomascolo,项目名称:nolearn_utils,代码行数:32,代码来源:train.py

示例8: rodar

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
	def rodar(self, numerodeimagens):

			np.set_printoptions(threshold=np.nan)
			sourcepath = str(os.getcwd())


			# Allocates space for each new image you want to classify, each line is an image
			X_test = np.zeros((numerodeimagens, 19200), dtype=np.int)

			strfiles =''
			i = 0

			# read the images
			for files in glob.glob(os.path.join(sourcepath, '*.jpeg')):
				X_test[i] = np.asarray(Image.open(files)).reshape(-1)[0:19200]
				strfiles += files + '<br>'
				i += 1



			# Reshape the images to help the CNN execution
			X_test = X_test.reshape((-1, 3, 80, 80))

			# Define the CNN, must be the same CNN saved into your model generated running CNN.py
			net1 = NeuralNet(
				layers=[('input', layers.InputLayer),
						('conv2d1', layers.Conv2DLayer),
						('maxpool1', layers.MaxPool2DLayer),
						('conv2d2', layers.Conv2DLayer),
						('maxpool2', layers.MaxPool2DLayer),
						('conv2d3', layers.Conv2DLayer),
						('maxpool3', layers.MaxPool2DLayer),
						# ('conv2d4', layers.Conv2DLayer),
						# ('maxpool4', layers.MaxPool2DLayer),
						('dropout1', layers.DropoutLayer),
						# s('dropout2', layers.DropoutLayer),
						('dense', layers.DenseLayer),
						# ('dense2', layers.DenseLayer),
						('output', layers.DenseLayer),
						],

				input_shape=(None, 3, 80, 80),

				conv2d1_num_filters=16,
				conv2d1_filter_size=(3, 3),
				conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
				conv2d1_W=lasagne.init.GlorotUniform(),

				maxpool1_pool_size=(2, 2),

				conv2d2_num_filters=16,
				conv2d2_filter_size=(3, 3),
				conv2d2_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool2_pool_size=(2, 2),

				conv2d3_num_filters=16,
				conv2d3_filter_size=(3, 3),
				conv2d3_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool3_pool_size=(2, 2),

				# conv2d4_num_filters = 16,
				# conv2d4_filter_size = (2,2),
				# conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

				# maxpool4_pool_size = (2,2),

				dropout1_p=0.5,

				# dropout2_p = 0.5,

				dense_num_units=16,
				dense_nonlinearity=lasagne.nonlinearities.rectify,

				# dense2_num_units = 16,
				# dense2_nonlinearity = lasagne.nonlinearities.rectify,

				output_nonlinearity=lasagne.nonlinearities.softmax,
				output_num_units=2,

				update=nesterov_momentum,
				update_learning_rate=0.001,
				update_momentum=0.9,
				max_epochs=1000,
				verbose=1,
			)

			net1.load_params_from(os.path.join(sourcepath, "#0#0#0#.txt"))  # Read model



			preds = net1.predict(X_test)  # make predictions


			strfiles = strfiles.replace(str(os.getcwd()), "").replace(".jpeg", '').replace("/", '')


			strfiles = strfiles.split("<br>")

#.........这里部分代码省略.........
开发者ID:pedfx,项目名称:BIDHU,代码行数:103,代码来源:test.py

示例9: a

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
        ],
    input_shape = (None, 1, 20, 20),
    conv_num_filters = 32, conv_filter_size = (3, 3), 
    pool_pool_size = (2, 2),
	hidden_num_units = 50,
    output_num_units = 2, output_nonlinearity = softmax,

    update_learning_rate=0.01,
    update_momentum = 0.9,

    regression = False,
    max_epochs = 50,
    verbose = 1,
    )

net.load_params_from(CNN_Weights) 

##############################################################################
# Scan the entire image with a (w x h) window
##############################################################################

for root, dirs, files in os.walk(SourceDir): 
    for name in files:
        
        ext = ['.jpg', '.jpeg', '.gif', '.png']
        if name.endswith(tuple(ext)):

			path = os.path.join(root,name)
			orig_image = Image.open(path).convert('RGBA')
			image = orig_image.convert('L')  # Convert to grayscale
			image = ImageOps.equalize(image)  # Histogram equalization
开发者ID:SPaterakis,项目名称:Face_Detection,代码行数:33,代码来源:face_detection.py

示例10: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    names = np.stack([name for name in [
        [os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz') for patient in patients]
    ] if name is not None], axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' +
              c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(
            dir_name=dir_name,
            use_flair=True,
            use_pd=True,
            use_t2=True,
            use_t1=True,
            use_gado=False,
            flair_name='FLAIR_preprocessed.nii.gz',
            pd_name='DP_preprocessed.nii.gz',
            t2_name='T2_preprocessed.nii.gz',
            t1_name='T1_preprocessed.nii.gz',
            gado_name=None,
            mask_name='Consensus.nii.gz',
            size=patch_size
        )

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)

    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]), 'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000, patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)

#.........这里部分代码省略.........
开发者ID:marianocabezas,项目名称:miccai_challenge2016,代码行数:103,代码来源:train_challenge.py

示例11: rodar

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
	def rodar(self):

			np.set_printoptions(threshold=np.nan)
			sourcepath = Classificar.sourcepath
			numerodeimagens = Classificar.numerodeimagens

			X_test = np.zeros((numerodeimagens, 19200),
							  dtype=np.int)  # Allocates space for each new image you want to classify, each line is an image

			for i in range(1, numerodeimagens):  # read the images
				X_test[i - 1] = np.asarray(Image.open(sourcepath+"galaxy" + str(i) + ".jpg")).reshape(
					-1)[0:19200]

			# Reshape the images to help the CNN execution
			X_test = X_test.reshape((-1, 3, 80, 80))

			# Define the CNN, must be the same CNN that is saved into your model that you generated running CNN.py
			net1 = NeuralNet(
				layers=[('input', layers.InputLayer),
						('conv2d1', layers.Conv2DLayer),
						('maxpool1', layers.MaxPool2DLayer),
						('conv2d2', layers.Conv2DLayer),
						('maxpool2', layers.MaxPool2DLayer),
						('conv2d3', layers.Conv2DLayer),
						('maxpool3', layers.MaxPool2DLayer),
						# ('conv2d4', layers.Conv2DLayer),
						# ('maxpool4', layers.MaxPool2DLayer),
						('dropout1', layers.DropoutLayer),
						# s('dropout2', layers.DropoutLayer),
						('dense', layers.DenseLayer),
						# ('dense2', layers.DenseLayer),
						('output', layers.DenseLayer),
						],

				input_shape=(None, 3, 80, 80),

				conv2d1_num_filters=16,
				conv2d1_filter_size=(3, 3),
				conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
				conv2d1_W=lasagne.init.GlorotUniform(),

				maxpool1_pool_size=(2, 2),

				conv2d2_num_filters=16,
				conv2d2_filter_size=(3, 3),
				conv2d2_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool2_pool_size=(2, 2),

				conv2d3_num_filters=16,
				conv2d3_filter_size=(3, 3),
				conv2d3_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool3_pool_size=(2, 2),

				# conv2d4_num_filters = 16,
				# conv2d4_filter_size = (2,2),
				# conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

				# maxpool4_pool_size = (2,2),

				dropout1_p=0.5,

				# dropout2_p = 0.5,

				dense_num_units=16,
				dense_nonlinearity=lasagne.nonlinearities.rectify,

				# dense2_num_units = 16,
				# dense2_nonlinearity = lasagne.nonlinearities.rectify,

				output_nonlinearity=lasagne.nonlinearities.softmax,
				output_num_units=2,

				update=nesterov_momentum,
				update_learning_rate=0.001,
				update_momentum=0.9,
				max_epochs=1000,
				verbose=1,
			)

			net1.load_params_from("/Users/Pedro/PycharmProjects/BIDHU/docs/train.txt")  # Read model

			preds = net1.predict(X_test)  # make predictions


			strpreds = str(preds)
			strpreds = strpreds.replace(" ", "\n")

			strpreds = strpreds.replace("1", "yes")
			strpreds = strpreds.replace("0", "no")
			xstrpreds = (strpreds.splitlines())
			for i in range(len(xstrpreds)):
				xstrpreds[i] = str(i + 1) + "-" + xstrpreds[i]
			strpreds = str(xstrpreds)
			strpreds = strpreds.replace(" ", "\n")
			strpreds = strpreds.replace("[", "")
			strpreds = strpreds.replace("]", "")
			strpreds = strpreds.replace("'", "")
			strpreds = strpreds.replace(",", "")
#.........这里部分代码省略.........
开发者ID:pedfx,项目名称:BIDHU,代码行数:103,代码来源:test.py

示例12: load2d

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]

import sys

sys.setrecursionlimit(10000)

X = None
y = None

if os.path.exists('X.pickle') and os.path.exists('y.pickle'):
    X = pickle.load(open('X.pickle', 'rb'))
    y = pickle.load(open('y.pickle', 'rb'))
else:
    X, y = load2d()
    with open('X.pickle', 'wb') as f:
        pickle.dump(X, f, -1)
    with open('y.pickle', 'wb') as f:
        pickle.dump(y, f, -1)

if os.path.exists('net.pickle'):
    print 'already learning end'
elif os.path.exists('net_epoch_backup.pickle'):
    net.load_params_from('net_epoch_backup.pickle')
    net.fit(X, y)
else:
    net.fit(X, y)

if net is not None:
    with open('net.pickle', 'wb') as f:
        pickle.dump(y, f, -1)
开发者ID:kanak87,项目名称:oldboy_rep,代码行数:31,代码来源:cnn.py

示例13: fit_net2

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def fit_net2(fname='net.pickle', sfname='net2.pickle'):
	with open(fname, 'r') as f:
		net = pickle.load(f)
	l1=net.get_all_layers()

	net2 = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('conv1', Conv2DLayer),
			('pool1', MaxPool2DLayer),
			('dropout1', layers.DropoutLayer),
			('conv2', Conv2DLayer),
			('pool2', MaxPool2DLayer),
			('dropout2', layers.DropoutLayer),
			('conv3', Conv2DLayer),
			('pool3', MaxPool2DLayer),
			('dropout3', layers.DropoutLayer),
			('hidden4', FactoredLayer),
			('dropout4', layers.DropoutLayer),
			('hidden5', FactoredLayer),
			('output', layers.DenseLayer),
			],
		input_shape=(None, 1, 96, 96),
		conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
		dropout1_p=0.1,
		conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
		dropout2_p=0.2,
		conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
		dropout3_p=0.3,
		hidden4_num_units=1000,
		hidden4_num_hidden=200,
		hidden4_W=l1[10].W.get_value(),
		hidden4_b=l1[10].b.get_value(),
		dropout4_p=0.5,
		hidden5_num_units=1000,
		hidden5_num_hidden=200,
		hidden5_W=l1[12].W.get_value(),
		hidden5_b=l1[12].b.get_value(),
		output_num_units=30, output_nonlinearity=None,

		update_learning_rate=theano.shared(float32(0.03)),
		update_momentum=theano.shared(float32(0.9)),

		regression=True,
		batch_iterator_train=FlipBatchIterator(batch_size=128),
		on_epoch_finished=[
			AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
			AdjustVariable('update_momentum', start=0.9, stop=0.999),
			EarlyStopping(patience=200),
			],
		max_epochs=1,
		verbose=1,
		)
	
	X, y = load2d()
	net2.fit(X, y)
	net2.load_params_from(net.get_all_params_values())
	#net2.fit(X, y)
	"""
	l2=net2.get_all_layers()
	print(l2)
	for i in xrange(len(l1)):
		if i!=10 and i!=12:
			all_param_values = lasagne.layers.get_all_param_values(l1[i])
			lasagne.layers.set_all_param_values(l2[i], all_param_values)
	"""
	with open(sfname, 'wb') as f:
		pickle.dump(net2, f, -1)
开发者ID:Bobgy,项目名称:kfkd-tutorial,代码行数:70,代码来源:kfkd.py

示例14: build_dbn

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]

#.........这里部分代码省略.........
		dropout2_p=0.5, hidden4_num_units=hidden_layer_size,
		dropout3_p=0.3, hidden5_num_units=hidden_layer_size,
		dropout4_p=0.2, output_num_units=N_EVENTS,
		output_nonlinearity=sigmoid,
		
		batch_iterator_train = BatchIterator(batch_size=1000),
		batch_iterator_test = BatchIterator(batch_size=1000),
		
		y_tensor_type=theano.tensor.matrix,
		update=nesterov_momentum,
		update_learning_rate=theano.shared(float(0.03)),
		update_momentum=theano.shared(float(0.9)),
		
		objective_loss_function=loss,
		regression=True,

		on_epoch_finished=[
			AdjustVariable('update_learning_rate', start=0.03,stop=0.0001),
			AdjustVariable('update_momentum', start=0.9, stop=0.999),
			EarlyStopping(patience=100),	
		],

		max_epochs=max_epochs,
		verbose=1,
		)
	
	# load trial dataset
	dic = pickle.load(open('datapickled/traildata.pickle', 'rb'))
	
	X = dic['X']
	y = dic['y']
	
	# process training data
	total_time_points = len(X) // NO_TIME_POINTS
	no_rows = total_time_points * NO_TIME_POINTS

	X = X[0:no_rows, :]
	
	X = X.transpose()
	X_Samples = np.split(X, total_time_points, axis=1)
	X = np.asarray(X_Samples)
	
	y = y[0:no_rows, :]
	y = y[::NO_TIME_POINTS, :]
	y = y.astype('float32')
	
	net.fit(X,y)
	
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	
	# Save the net
	with open('net/net'+tip+'.pickle', 'wb') as f:
		pickle.dump(net, f, -1)
	
	plot(net)

	# Load test data
	dic = pickle.load(open('datapickled/testdata2.pickle', 'rb'))
	X_test = dic['X_test']
	ids_tot = dic['ids_tot']
	test_dict = dic['test_dict']
	test_total = dic['test_total']

	####process test data####
	print("Creating prediction file ... ")
	
	X_test = X_test
	total_test_len = len(X_test)
	
	total_test_time_points = len(X_test) // NO_TIME_POINTS
	remainder_test_points = len(X_test) % NO_TIME_POINTS
	
	no_rows = total_test_time_points * NO_TIME_POINTS
	X_test = X_test[0:no_rows, :]

	X_test = X_test.transpose()
	X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
	X_test = np.asarray(X_test_Samples)
	
	# Evaluate test data
	print("Testing subject 0....")
	params = net.get_all_params_values()
	learned_weights = net.load_params_from(params)
	probabilities = net.predict_proba(X_test)
	
	total_test_points = total_test_len // NO_TIME_POINTS
	remainder_data = total_test_len % NO_TIME_POINTS
	for i, p in enumerate(probabilities):
		if i != total_test_points:
			for j in range(NO_TIME_POINTS):
				pred_tot.append(p)
	
	# create prediction file
	print('Creating submission(prediction) file...')
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	submission_file = 'res/test_conv_net_push'+tip+'.csv'
	# create pandas object
	submission =  pd.DataFrame(index=ids_tot[:len(pred_tot)],columns=cols,data=pred_tot)
	# write file
	submission.to_csv(submission_file, index_label='id', float_format='%.6f')
开发者ID:LadyEos,项目名称:EegCovNet,代码行数:104,代码来源:convnet.py

示例15: main

# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import load_params_from [as 别名]
def main():

    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('-f', '--folder', dest='dir_name', default='/home/sergivalverde/w/CNN/images/CH16')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--mask', action='store', dest='mask', default='Consensus.nii.gz')
    parser.add_argument('--old', action='store_true', dest='old', default=False)
    options = vars(parser.parse_args())

    c = color_codes()
    patch_size = (15, 15, 15)
    batch_size = 100000
    # Create the data
    patients = [f for f in sorted(os.listdir(options['dir_name']))
                if os.path.isdir(os.path.join(options['dir_name'], f))]
    flair_names = [os.path.join(options['dir_name'], patient, options['flair']) for patient in patients]
    pd_names = [os.path.join(options['dir_name'], patient, options['pd']) for patient in patients]
    t2_names = [os.path.join(options['dir_name'], patient, options['t2']) for patient in patients]
    t1_names = [os.path.join(options['dir_name'], patient, options['t1']) for patient in patients]
    names = np.stack([name for name in [flair_names, pd_names, t2_names, t1_names]])
    seed = np.random.randint(np.iinfo(np.int32).max)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting leave-one-out' + c['nc'])

    for i in range(0, 15):
        case = names[0, i].rsplit('/')[-2]
        path = '/'.join(names[0, i].rsplit('/')[:-1])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] + 'Patient ' + c['b'] + case + c['nc'])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Running iteration ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
        net_name = os.path.join(path, 'deep-challenge2016.init.')
        net = NeuralNet(
            layers=[
                (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
                (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
                (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
                (DropoutLayer, dict(name='l2drop', p=0.5)),
                (DenseLayer, dict(name='l1', num_units=256)),
                (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
            ],
            objective_loss_function=objectives.categorical_crossentropy,
            update=updates.adam,
            update_learning_rate=0.0001,
            on_epoch_finished=[
                SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
                EarlyStopping(patience=10)
            ],
            verbose=10,
            max_epochs=50,
            train_split=TrainSplit(eval_size=0.25),
            custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
        )
        flair_name = os.path.join(path, options['flair'])
        pd_name = os.path.join(path, options['pd'])
        t2_name = os.path.join(path, options['t2'])
        t1_name = os.path.join(path, options['t1'])
        names_test = np.array([flair_name, pd_name, t2_name, t1_name])
        outputname1 = os.path.join(path, 'test' + str(i) + '.iter1.nii.gz')
        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
            mask_names = [os.path.join(p_path, 'Consensus.nii.gz') for p_path in paths]

            x_train, y_train = load_iter1_data(
                names_lou=names_lou,
                mask_names=mask_names,
                patch_size=patch_size,
                seed=seed
            )

            print('                Training vector shape ='
                  ' (' + ','.join([str(length) for length in x_train.shape]) + ')')
            print('                Training labels shape ='
                  ' (' + ','.join([str(length) for length in y_train.shape]) + ')')

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc'])
            # We try to get the last weights to keep improving the net over and over
            net.fit(x_train, y_train)

        try:
            image_nii = load_nii(outputname1)
            image1 = image_nii.get_data()
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
            flair_name = os.path.join(path, options['flair'])
            image_nii = load_nii(flair_name)
            image1 = np.zeros_like(image_nii.get_data())
            print('              0% of data tested', end='\r')
            sys.stdout.flush()
#.........这里部分代码省略.........
开发者ID:marianocabezas,项目名称:miccai_challenge2016,代码行数:103,代码来源:train_test_challenge.py


注:本文中的nolearn.lasagne.NeuralNet.load_params_from方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。