當前位置: 首頁>>代碼示例>>Python>>正文


Python MLP.sum方法代碼示例

本文整理匯總了Python中mlp.MLP.sum方法的典型用法代碼示例。如果您正苦於以下問題:Python MLP.sum方法的具體用法?Python MLP.sum怎麽用?Python MLP.sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mlp.MLP的用法示例。


在下文中一共展示了MLP.sum方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train_mlp

# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import sum [as 別名]
def train_mlp(L1_reg = 0.0, L2_reg = 0.0000, num_batches_per_bunch = 512, batch_size = 1, num_bunches_queue = 5, offset = 0, path_name = '/afs/inf.ed.ac.uk/user/s12/s1264845/scratch/s1264845/data/'):
    

    voc_list = Vocabulary(path_name + 'train')
    voc_list.vocab_create()
    vocab = voc_list.vocab
    vocab_size = voc_list.vocab_size
    
    voc_list_valid = Vocabulary(path_name + 'valid')
    voc_list_valid.vocab_create()
    count = voc_list_valid.count

    voc_list_test = Vocabulary(path_name + 'test')
    voc_list_test.vocab_create()
    no_test_tokens = voc_list_test.count
    print 'The number of sentenses in test set:', no_test_tokens
 
    #print 'number of words in valid data:', count 
    dataprovider_train = DataProvider(path_name + 'train', vocab, vocab_size )
    dataprovider_valid = DataProvider(path_name + 'valid', vocab, vocab_size )
    dataprovider_test = DataProvider(path_name + 'test', vocab, vocab_size )

    #learn_list = [0.1, 0.1, 0.1, 0.75, 0.5, 0.25, 0.125, 0.0625, 0]
    exp_name = 'fine_tuning.hdf5'
    posterior_path = 'log_likelihoods'
    print '..building the model'

    #symbolic variables for input, target vector and batch index
    index = T.lscalar('index')
    x = T.fmatrix('x')
    y = T.ivector('y')
    learning_rate = T.fscalar('learning_rate') 

    #theano shares variables for train, valid and test
    train_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
    train_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
    
    valid_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
    valid_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
    
    test_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
    test_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
    
    rng = numpy.random.RandomState(1234) 
   
    classifier = MLP(rng = rng, input = x, n_in = vocab_size, n_hidden1 = 30, n_hidden2 = 60 , n_out = vocab_size)
    #classifier = MLP(rng = rng, input = x, n_in = vocab_size, n_hidden = 60, n_out = vocab_size)

    cost = classifier.negative_log_likelihood(y) + L1_reg * classifier.L1 + L2_reg * classifier.L2_sqr
    
    #constructor for learning rate class
    learnrate_schedular = LearningRateNewBob(start_rate=0.001, scale_by=.5, max_epochs=9999,\
                                    min_derror_ramp_start=.1, min_derror_stop=.1, init_error=100.)

    #learnrate_schedular = LearningRateList(learn_list)

    frame_error = classifier.errors(y)
    likelihood = classifier.sum(y)

    #test model
    test_model = theano.function(inputs = [index], outputs = likelihood,  \
                                 givens = {x: test_set_x[index * batch_size:(index + 1) * batch_size],
                                           y: test_set_y[index * batch_size:(index + 1) * batch_size]})
    #validation_model
    validate_model = theano.function(inputs = [index], outputs = [frame_error, likelihood], \
                                     givens = {x: valid_set_x[index * batch_size:(index + 1) * batch_size],
                                               y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

    gradient_param = []
    #calculates the gradient of cost with respect to parameters 
    for param in classifier.params:
        gradient_param.append(T.cast(T.grad(cost, param), 'float32'))
        
    updates = []
    
    for param, gradient in zip(classifier.params, gradient_param):
        updates.append((param, param - learning_rate * gradient))
    
    #training_model
    train_model = theano.function(inputs = [index, theano.Param(learning_rate, default = 0.01)], outputs = cost, updates = updates, \
                                 givens = {x: train_set_x[index * batch_size:(index + 1) * batch_size],
                                           y: train_set_y[index * batch_size:(index + 1) * batch_size]})
   

    #theano.printing.pydotprint(train_model, outfile = "pics/train.png", var_with_name_simple = True) 
    #path_save = '/afs/inf.ed.ac.uk/user/s12/s1264845/scratch/s1264845/mlp/saved_weights/' 
    print '.....training'    
    best_valid_loss = numpy.inf    
    epoch = 1
    start_time = time.time()
    while(learnrate_schedular.get_rate() != 0):
	
	print 'learning_rate:', learnrate_schedular.get_rate()
        print 'epoch_number:', learnrate_schedular.epoch
        
        frames_showed, progress = 0, 0
        start_epoch_time = time.time()
        
        tqueue = TNetsCacheSimple.make_queue()
        cache = TNetsCacheSimple(tqueue, shuffle_frames = True, offset=0, \
#.........這裏部分代碼省略.........
開發者ID:andersonhaynes,項目名稱:nnlm,代碼行數:103,代碼來源:nn_lm_new.py


注:本文中的mlp.MLP.sum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。