当前位置: 首页>>代码示例>>Python>>正文


Python SdA类代码示例

本文整理汇总了Python中SdA的典型用法代码示例。如果您正苦于以下问题:Python SdA类的具体用法?Python SdA怎么用?Python SdA使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SdA类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: buildFirstLayer

	def buildFirstLayer(self, loaded):

		numpy_rng = numpy.random.RandomState(89677)

		self.logger.debug('... building the model')
		# construct the stacked denoising autoencoder class
		self.sda = SdA(
			numpy_rng = numpy_rng,
			n_ins = len(self.fuzzyDict),
			hidden_layers_sizes = [len(self.fuzzyDict)*1]*2,
			n_outs = math.sqrt(len(self.fuzzyDict))
		)
开发者ID:davidhughhenrymack,项目名称:lab,代码行数:12,代码来源:tardy.py

示例2: test_SdA

def test_SdA(finetune_lr=0.1, pretraining_epochs=1,
             pretrain_lr=0.001, training_epochs=1, 
             b_patch_filename = 'b_10_Training_patches_norm.npy', b_groundtruth_filename = 'b_Training_labels_norm.npy',
             b_valid_filename = 'b_10_Validation_patches_norm.npy', b_validtruth_filename = 'b_Validation_labels_norm.npy',
             u_patch_filename = 'u_10_Training_patches_norm.npy', u_groundtruth_filename = 'u_Training_labels_norm.npy',
             u_valid_filename = 'u_10_Validation_patches_norm.npy', u_validtruth_filename = 'u_Validation_labels_norm.npy',
             batch_size=100, n_ins = 605, n_outs = 2, hidden_layers_sizes = [1000,1000,1000],prefix = '11_11_3_G4_', corruption_levels=[0.2,0.2,0.2] ):
                 
    """
    Demonstrates how to train and test a stochastic denoising autoencoder.

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used in the finetune stage
    (factor for the stochastic gradient)

    :type pretraining_epochs: int
    :param pretraining_epochs: number of epoch to do pretraining

    :type pretrain_lr: float
    :param pretrain_lr: learning rate to be used during pre-training

    :type n_iter: int
    :param n_iter: maximal number of iterations to run the optimizer

    :type dataset: string
    :param dataset: path the the pickled dataset

    """
   
    print '###########################'
    print 'Pretraining epochs: ', pretraining_epochs
    print 'Finetuning epochs: ', training_epochs
    print '###########################'
    
    W = []
    b = []
    
    #########################################################
    #########################################################
   
    resumeTraining = False
    
    #@@@@@@@@ Needs to be worked on @@@@@@@@@@@@@@@@@
    # Snippet to resume training if the program crashes halfway through #
    opts, arg = getopt.getopt(sys.argv[1:],"rp:")
    for opt, arg in opts:
        if opt == '-r':
            resumeTraining = True                               # make this true to resume training from saved model    
        elif opt == '-p':
            prefix = arg
            
    flagValue = 1    
    
    if(resumeTraining):
        
        flagFile = file(prefix+'flag.pkl','rb')
        
        try:
            flagValue = cPickle.load(flagFile)
        except:
            pass
        
        savedModel_preTraining = file(prefix+'pre_training.pkl','rb')
        genVariables_preTraining = cPickle.load(savedModel_preTraining)
        layer_number, epochs_done_preTraining, mean_cost , pretrain_lr = genVariables_preTraining
        epoch_flag = 1
        print 'Inside resumeTraining!!!!!!!!!!!!!!!!!!'
        no_of_layers = len(hidden_layers_sizes) + 1
        
        for i in xrange(no_of_layers):
            try:
                W.append(cPickle.load(savedModel_preTraining))
                b.append(cPickle.load(savedModel_preTraining))
            except:
                W.append(None)
                b.append(None)
                    
        if flagValue is 2:
            epochFlag_fineTuning = 1
            iterFlag = 1
            savedModel_fineTuning = file(prefix+'fine_tuning.pkl','rb')
            hidden_layers_sizes = cPickle.load(savedModel_fineTuning)
            genVariables_fineTuning = cPickle.load(savedModel_fineTuning)
            epochs_done_fineTuning,best_validation_loss,finetune_lr,patience,iters_done = genVariables_fineTuning
    
   
    else:
        
        layer_number, epochs_done, mean_cost, pretrain_lr = [0,0,0,pretrain_lr]
        epoch_flag = 0
        epochFlag_fineTuning = 0
        iterFlag = 0
        W = None
        b = None
                
    ##############################################################
    ##############################################################

#.........这里部分代码省略.........
开发者ID:subru1603,项目名称:DDP_SdA_Brain,代码行数:101,代码来源:10_test_SdA.py

示例3: test_SdA

def test_SdA():
    SdA.test_SdA(pretraining_epochs=1, training_epochs=1, batch_size=300)
开发者ID:MrChocolateMoose,项目名称:DeepHyperNEAT,代码行数:2,代码来源:test.py

示例4:

filename=data_dir + "GM12878_200bp_Data_3Cl_l2normalized_TestSet.txt";
test_set_x_org=numpy.loadtxt(filename,delimiter='\t',dtype='float32')
filename=data_dir + "GM12878_200bp_Classes_3Cl_l2normalized_TestSet.txt";
test_set_y_org=numpy.loadtxt(filename,delimiter='\t',dtype=object)
prev,test_set_y_org=cl.change_class_labels(test_set_y_org)

filename=data_dir + "GM12878_Features_Unique.txt";
features=numpy.loadtxt(filename,delimiter='\t',dtype=object)  

rng=numpy.random.RandomState(1000)

# train
classifier,training_time=SdA.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, 
                valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, 
                pretrain_lr=0.1,finetune_lr=0.1, alpha=0.01, 
                lambda_reg=0.00005, alpha_reg=0.5, 
                n_hidden=[64,64,32], corruption_levels=[0.01,0.01,0.01],
                pretraining_epochs=5, training_epochs=1000,
                batch_size=200, rng=rng)
                        
# test
test_set_y_pred,test_set_y_pred_prob,test_time=SdA.test_model(classifier, test_set_x_org, batch_size=200)
print test_set_y_pred[0:20]
print test_set_y_pred_prob[0:20]
print test_time

# evaluate classification performance
perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org))
print perf
print conf_mat
开发者ID:LazyXuan,项目名称:DECRES,代码行数:30,代码来源:main_SdA.py

示例5: test_SdA

def test_SdA():
    t0=time.time()
    SdA.test_SdA(pretraining_epochs = 2, training_epochs = 3, batch_size = 300)
    print >> sys.stderr, "test_SdA took %.3fs expected 971s in our buildbot"%(time.time()-t0)
开发者ID:pascanur,项目名称:DeepLearningTutorials,代码行数:4,代码来源:test.py

示例6: test_SdA

def test_SdA(finetune_lr=0.1, pretraining_epochs=1,
             pretrain_lr=0.001, training_epochs=1, 
             b_patch_filename = 'b_Training_patches_norm.npy', b_groundtruth_filename = 'b_Training_labels_norm.npy',
             b_valid_filename = 'b_Validation_patches_norm.npy', b_validtruth_filename = 'b_Validation_labels_norm.npy',
             u_patch_filename = 'u_Training_patches_norm.npy', u_groundtruth_filename = 'u_Training_labels_norm.npy',
             u_valid_filename = 'u_Validation_patches_norm.npy', u_validtruth_filename = 'u_Validation_labels_norm.npy',
             batch_size=100, n_ins = 605, n_outs = 5, hidden_layers_sizes = [1000,1000,1000],prefix = '11_11_3_G4_', corruption_levels=[0.2,0.2,0.2], resumeTraining = False, StopAtPretraining = False):
                 
    """
    Demonstrates how to train and test a stochastic denoising autoencoder.

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used in the finetune stage
    (factor for the stochastic gradient)

    :type pretraining_epochs: int
    :param pretraining_epochs: number of epoch to do pretraining

    :type pretrain_lr: float
    :param pretrain_lr: learning rate to be used during pre-training

    :type n_iter: int
    :param n_iter: maximal number of iterations to run the optimizer

    :type dataset: string
    :param dataset: path the the pickled dataset

    """
   
    print '###########################'
    print 'Pretraining epochs: ', pretraining_epochs
    print 'Finetuning epochs: ', training_epochs
    print '###########################'
    
    W = []
    b = []
    
    #########################################################
    #########################################################
    
    #@@@@@@@@ Needs to be worked on @@@@@@@@@@@@@@@@@
    # Snippet to resume training if the program crashes halfway through #
    opts, arg = getopt.getopt(sys.argv[1:],"rp:")
    for opt, arg in opts:
        if opt == '-r':
            resumeTraining = True                               # make this true to resume training from saved model    
        elif opt == '-p':
            prefix = arg
            
    flag = 0
    
    if(resumeTraining):
        
        flag = 1
        
        path = '/media/brain/1A34723D34721BC7/BRATS/codes/results/test_255_9x9x3/9x9x3pre_training.pkl'
                
        savedModel_preTraining = file(path,'rb')
        genVariables_preTraining = cPickle.load(savedModel_preTraining)
        layer_number, epochs_done_preTraining, mean_cost , pretrain_lr = genVariables_preTraining
        epoch_flag = 1
        print 'Inside resumeTraining!!!!!!!!!!!!!!!!!!'
        no_of_layers = len(hidden_layers_sizes) + 1
        
        for i in xrange(no_of_layers):
            W.append(cPickle.load(savedModel_preTraining))
            b.append(cPickle.load(savedModel_preTraining))    
   
              
    ##############################################################
    ##############################################################

    if flag == 0:
                
        datasets = load_data(b_patch_filename,b_groundtruth_filename,b_valid_filename,b_validtruth_filename)
    
        train_set_x, train_set_y = datasets[0]
        valid_set_x, valid_set_y = datasets[1]
        test_set_x, test_set_y = datasets[2]
        
    
        # compute number of minibatches for training, validation and testing
        n_train_batches = train_set_x.get_value(borrow=True).shape[0]
        n_train_batches /= batch_size
    
        # numpy random generator
        # start-snippet-3
        numpy_rng = numpy.random.RandomState(89677)
        print '... building the model'
        
    #    print 'W: ', W
    #    print 'b: ', b
        
        ################################################################
        ################CONSTRUCTION OF SdA CLASS#######################
        sda = SdA(
            numpy_rng=numpy_rng,
            n_ins=n_ins,
#.........这里部分代码省略.........
开发者ID:kvrd18,项目名称:DDP_SdA_Brain,代码行数:101,代码来源:test_SdA.py

示例7: test_SdA

def test_SdA():
    SdA.test_SdA(pretraining_epochs = 2, training_epochs = 3)
开发者ID:sauravbiswasiupr,项目名称:image_transformations,代码行数:2,代码来源:test.py

示例8:

            pretrain_lr=0.1
            finetune_lr=0.1
            alpha=0.1
            lambda_reg=0.00005
            alpha_reg=0.5
            n_hidden=[256,128,64]
            corruption_levels=[0.01,0.01,0.01]
            pretraining_epochs=5
            training_epochs=1000
            batch_size=100

            # train, and extract features from training set
            classifier,training_time=SdA.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, 
                                                     valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, 
                                                     pretrain_lr=pretrain_lr,finetune_lr=finetune_lr, alpha=alpha, 
                                                     lambda_reg=lambda_reg, alpha_reg=alpha_reg, 
                                                     n_hidden=n_hidden, corruption_levels=corruption_levels,
                                                     pretraining_epochs=pretraining_epochs, training_epochs=training_epochs,
                                                     batch_size=batch_size, rng=rng)
            
            # test the classifier
            test_set_y_pred,test_set_y_pred_prob,test_time=SdA.test_model(classifier, test_set_x_org, batch_size=200)
                        
            # evaluate classification performance
            perf_i,conf_mat_i=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org))
            print perf_i
            print conf_mat_i
            if i==0:
                perf=perf_i
                conf_mat=conf_mat_i
                training_times=training_time
开发者ID:LazyXuan,项目名称:DECRES,代码行数:31,代码来源:main_SdA_new.py

示例9: Tardy

class Tardy(object):
	
	def __init__(self):
		"""
			Analyse lateness data and make predications
		"""

		self.Loaded = namedtuple('Loaded', 'columnMap data')
		# self.log = Log("Tardy", None, None)
		self.logger = logging.getLogger('Tardy')

	def loadData(self, filename):
		new_path = os.path.join(
			os.path.split(__file__)[0],
			"..", "..",
			"data",
			filename
		)

		extract = ["fragmentOrderProduct", "earlyByHours"]

		with open(new_path, 'rb') as csvfile:
			reader = csv.reader(csvfile)
			firstLine = reader.next()
			columnMap = dict(zip(iter(firstLine),itertools.count()))

			data = []
			for i in reader:
				row = []
				for j in extract:
					row.append(i[columnMap[j]])
				data.append(row)

		return self.Loaded (columnMap, data)

	def vectoriseData(self, loaded):

		f = FuzzyStringDict()

		# Identify ahead to make vectors same size
		for i in loaded.data:
			f.identify(i[0])

		# Transform
		self.data = [[f.toVector(i[0]),i[1]] for i in loaded.data]

		self.logger.debug("Loaded %d training items" % (len(self.data)))

		self.train_set_x = tensor.as_tensor_variable([i[0] for i in self.data], name='train_x')

		self.fuzzyDict = f
		return self.Loaded (loaded.columnMap, self.data)

	def buildFirstLayer(self, loaded):

		numpy_rng = numpy.random.RandomState(89677)

		self.logger.debug('... building the model')
		# construct the stacked denoising autoencoder class
		self.sda = SdA(
			numpy_rng = numpy_rng,
			n_ins = len(self.fuzzyDict),
			hidden_layers_sizes = [len(self.fuzzyDict)*1]*2,
			n_outs = math.sqrt(len(self.fuzzyDict))
		)
		

	def trainFirstLayer(self):

		batch_size = 1
		
		self.logger.debug('... getting the pretraining functions')
		pretraining_fns = self.sda.pretraining_functions(train_set_x=self.train_set_x,
														batch_size=batch_size)
		
		self.logger.debug('... pre-training the model')
		start_time = timeit.default_timer()

		## Pre-train layer-wise
		corruption_levels = [.2] * 6
		pretraining_epochs = 3
		pretrain_lr = 0.1

		# compute number of minibatches for training, validation and testing
		n_train_batches = len(self.data) #self.train_set_x.get_value(borrow=True).shape[0]
		n_train_batches /= batch_size

		for i in xrange(self.sda.n_layers):
			# go through pretraining epochs
			for epoch in xrange(pretraining_epochs):
				# go through the training set
				c = []
				for batch_index in xrange(n_train_batches):
					c.append(pretraining_fns[i](index=batch_index,
							 corruption=corruption_levels[i],
							 lr=pretrain_lr))
				self.logger.debug('Pre-training layer %i with %i batches, epoch %d, cost ' % (i, n_train_batches, epoch))
				self.logger.debug(numpy.mean(c))
开发者ID:davidhughhenrymack,项目名称:lab,代码行数:98,代码来源:tardy.py


注:本文中的SdA类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。