當前位置: 首頁>>代碼示例>>Python>>正文


Python SdA.pretraining_functions方法代碼示例

本文整理匯總了Python中SdA.pretraining_functions方法的典型用法代碼示例。如果您正苦於以下問題:Python SdA.pretraining_functions方法的具體用法?Python SdA.pretraining_functions怎麽用?Python SdA.pretraining_functions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在SdA的用法示例。


在下文中一共展示了SdA.pretraining_functions方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_SdA

# 需要導入模塊: import SdA [as 別名]
# 或者: from SdA import pretraining_functions [as 別名]

#.........這裏部分代碼省略.........
        n_train_batches /= batch_size
    
        # numpy random generator
        # start-snippet-3
        numpy_rng = numpy.random.RandomState(89677)
        print '... building the model'
        
    #    print 'W: ', W
    #    print 'b: ', b
        
        ################################################################
        ################CONSTRUCTION OF SdA CLASS#######################
        sda = SdA(
            numpy_rng=numpy_rng,
            n_ins=n_ins,
            hidden_layers_sizes=hidden_layers_sizes,
            n_outs=n_outs)
            
        print 'SdA constructed'
        ################################################################
        ################################################################
        
        ################################################################
        # end-snippet-3 start-snippet-4
        #########################
        # PRETRAINING THE MODEL #
        #########################
    
        flag = open(prefix+'flag.pkl','wb')
        cPickle.dump(1,flag, protocol = cPickle.HIGHEST_PROTOCOL)
        flag.close()
            
        print '... getting the pretraining functions'
        pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,batch_size=batch_size)
        print 'Length of pretraining function: ', len(pretraining_fns)

        print '... pre-training the model'
        start_time = time.clock()
        ## Pre-train layer-wise
        log_pretrain_cost = []

        

        shapeimg = [(33,44),(50,60), (25,40), (50,10)]

        #corruption_levels = [.001, .001, .001]
        for i in xrange(sda.n_layers):
            
            # if i < layer_number:
            #     i = layer_number
                #print i
                # go through pretraining epochs
            best_cost = numpy.inf
            adapt_counter = 0
            learning_rate = pretrain_lr

            if i==0:
                num_of_epochs = pretraining_epochs
            else:
                num_of_epochs = pretraining_epochs
            for epoch in xrange(num_of_epochs):


                ##########################################            
                # if epoch_flag is 1 and epoch < epochs_done_preTraining:
                #     epoch = epochs_done_preTraining
開發者ID:kvrd18,項目名稱:DDP_SdA_Brain,代碼行數:70,代碼來源:test_SdA.py

示例2: test_SdA

# 需要導入模塊: import SdA [as 別名]
# 或者: from SdA import pretraining_functions [as 別名]

#.........這裏部分代碼省略.........
    n_train_batches /= batch_size

    # numpy random generator
    # start-snippet-3
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    
#    print 'W: ', W
#    print 'b: ', b
    
    ################################################################
    ################CONSTRUCTION OF SdA CLASS#######################
    sda = SdA(
        numpy_rng=numpy_rng,
        n_ins=n_ins,
        hidden_layers_sizes=hidden_layers_sizes,
        n_outs=n_outs, W = W, b=b)
        
    print 'SdA constructed'
    ################################################################
    ################################################################
    if flagValue is 1:
    ################################################################
    # end-snippet-3 start-snippet-4
    #########################
    # PRETRAINING THE MODEL #
    #########################
    
        flag = open(prefix+'flag.pkl','wb')
        cPickle.dump(1,flag, protocol = cPickle.HIGHEST_PROTOCOL)
        flag.close()
            
        print '... getting the pretraining functions'
        pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,batch_size=batch_size)
        print 'Length of pretraining function: ', len(pretraining_fns)

        print '... pre-training the model'
        start_time = time.clock()
        ## Pre-train layer-wise
        log_pretrain_cost = []
        #corruption_levels = [.001, .001, .001]
        for i in xrange(sda.n_layers):
        
            if i < layer_number:
                i = layer_number
                #print i
                # go through pretraining epochs
        
            for epoch in xrange(pretraining_epochs):
                ##########################################            
                if epoch_flag is 1 and epoch < epochs_done_preTraining:
                    epoch = epochs_done_preTraining
                    epoch_flag = 0
                    ##########################################
                    # go through the training set
                c = []
                for batch_index in xrange(n_train_batches):
                    #sprint batch_index
                    c.append(pretraining_fns[i](index=batch_index,
                         corruption=corruption_levels[i],
                         lr=pretrain_lr))
                print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
                print numpy.mean(c)
                log_pretrain_cost.append(numpy.mean(c))

            
開發者ID:subru1603,項目名稱:DDP_SdA_Brain,代碼行數:68,代碼來源:10_test_SdA.py

示例3: Tardy

# 需要導入模塊: import SdA [as 別名]
# 或者: from SdA import pretraining_functions [as 別名]
class Tardy(object):
	
	def __init__(self):
		"""
			Analyse lateness data and make predications
		"""

		self.Loaded = namedtuple('Loaded', 'columnMap data')
		# self.log = Log("Tardy", None, None)
		self.logger = logging.getLogger('Tardy')

	def loadData(self, filename):
		new_path = os.path.join(
			os.path.split(__file__)[0],
			"..", "..",
			"data",
			filename
		)

		extract = ["fragmentOrderProduct", "earlyByHours"]

		with open(new_path, 'rb') as csvfile:
			reader = csv.reader(csvfile)
			firstLine = reader.next()
			columnMap = dict(zip(iter(firstLine),itertools.count()))

			data = []
			for i in reader:
				row = []
				for j in extract:
					row.append(i[columnMap[j]])
				data.append(row)

		return self.Loaded (columnMap, data)

	def vectoriseData(self, loaded):

		f = FuzzyStringDict()

		# Identify ahead to make vectors same size
		for i in loaded.data:
			f.identify(i[0])

		# Transform
		self.data = [[f.toVector(i[0]),i[1]] for i in loaded.data]

		self.logger.debug("Loaded %d training items" % (len(self.data)))

		self.train_set_x = tensor.as_tensor_variable([i[0] for i in self.data], name='train_x')

		self.fuzzyDict = f
		return self.Loaded (loaded.columnMap, self.data)

	def buildFirstLayer(self, loaded):

		numpy_rng = numpy.random.RandomState(89677)

		self.logger.debug('... building the model')
		# construct the stacked denoising autoencoder class
		self.sda = SdA(
			numpy_rng = numpy_rng,
			n_ins = len(self.fuzzyDict),
			hidden_layers_sizes = [len(self.fuzzyDict)*1]*2,
			n_outs = math.sqrt(len(self.fuzzyDict))
		)
		

	def trainFirstLayer(self):

		batch_size = 1
		
		self.logger.debug('... getting the pretraining functions')
		pretraining_fns = self.sda.pretraining_functions(train_set_x=self.train_set_x,
														batch_size=batch_size)
		
		self.logger.debug('... pre-training the model')
		start_time = timeit.default_timer()

		## Pre-train layer-wise
		corruption_levels = [.2] * 6
		pretraining_epochs = 3
		pretrain_lr = 0.1

		# compute number of minibatches for training, validation and testing
		n_train_batches = len(self.data) #self.train_set_x.get_value(borrow=True).shape[0]
		n_train_batches /= batch_size

		for i in xrange(self.sda.n_layers):
			# go through pretraining epochs
			for epoch in xrange(pretraining_epochs):
				# go through the training set
				c = []
				for batch_index in xrange(n_train_batches):
					c.append(pretraining_fns[i](index=batch_index,
							 corruption=corruption_levels[i],
							 lr=pretrain_lr))
				self.logger.debug('Pre-training layer %i with %i batches, epoch %d, cost ' % (i, n_train_batches, epoch))
				self.logger.debug(numpy.mean(c))
開發者ID:davidhughhenrymack,項目名稱:lab,代碼行數:100,代碼來源:tardy.py


注:本文中的SdA.pretraining_functions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。