本文整理汇总了Python中mlp.MLP.sum方法的典型用法代码示例。如果您正苦于以下问题:Python MLP.sum方法的具体用法?Python MLP.sum怎么用?Python MLP.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlp.MLP
的用法示例。
在下文中一共展示了MLP.sum方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_mlp
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import sum [as 别名]
def train_mlp(L1_reg = 0.0, L2_reg = 0.0000, num_batches_per_bunch = 512, batch_size = 1, num_bunches_queue = 5, offset = 0, path_name = '/afs/inf.ed.ac.uk/user/s12/s1264845/scratch/s1264845/data/'):
voc_list = Vocabulary(path_name + 'train')
voc_list.vocab_create()
vocab = voc_list.vocab
vocab_size = voc_list.vocab_size
voc_list_valid = Vocabulary(path_name + 'valid')
voc_list_valid.vocab_create()
count = voc_list_valid.count
voc_list_test = Vocabulary(path_name + 'test')
voc_list_test.vocab_create()
no_test_tokens = voc_list_test.count
print 'The number of sentenses in test set:', no_test_tokens
#print 'number of words in valid data:', count
dataprovider_train = DataProvider(path_name + 'train', vocab, vocab_size )
dataprovider_valid = DataProvider(path_name + 'valid', vocab, vocab_size )
dataprovider_test = DataProvider(path_name + 'test', vocab, vocab_size )
#learn_list = [0.1, 0.1, 0.1, 0.75, 0.5, 0.25, 0.125, 0.0625, 0]
exp_name = 'fine_tuning.hdf5'
posterior_path = 'log_likelihoods'
print '..building the model'
#symbolic variables for input, target vector and batch index
index = T.lscalar('index')
x = T.fmatrix('x')
y = T.ivector('y')
learning_rate = T.fscalar('learning_rate')
#theano shares variables for train, valid and test
train_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
train_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
valid_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
valid_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
test_set_x = theano.shared(numpy.empty((1,1), dtype='float32'), allow_downcast = True)
test_set_y = theano.shared(numpy.empty((1), dtype = 'int32'), allow_downcast = True)
rng = numpy.random.RandomState(1234)
classifier = MLP(rng = rng, input = x, n_in = vocab_size, n_hidden1 = 30, n_hidden2 = 60 , n_out = vocab_size)
#classifier = MLP(rng = rng, input = x, n_in = vocab_size, n_hidden = 60, n_out = vocab_size)
cost = classifier.negative_log_likelihood(y) + L1_reg * classifier.L1 + L2_reg * classifier.L2_sqr
#constructor for learning rate class
learnrate_schedular = LearningRateNewBob(start_rate=0.001, scale_by=.5, max_epochs=9999,\
min_derror_ramp_start=.1, min_derror_stop=.1, init_error=100.)
#learnrate_schedular = LearningRateList(learn_list)
frame_error = classifier.errors(y)
likelihood = classifier.sum(y)
#test model
test_model = theano.function(inputs = [index], outputs = likelihood, \
givens = {x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
#validation_model
validate_model = theano.function(inputs = [index], outputs = [frame_error, likelihood], \
givens = {x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
gradient_param = []
#calculates the gradient of cost with respect to parameters
for param in classifier.params:
gradient_param.append(T.cast(T.grad(cost, param), 'float32'))
updates = []
for param, gradient in zip(classifier.params, gradient_param):
updates.append((param, param - learning_rate * gradient))
#training_model
train_model = theano.function(inputs = [index, theano.Param(learning_rate, default = 0.01)], outputs = cost, updates = updates, \
givens = {x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
#theano.printing.pydotprint(train_model, outfile = "pics/train.png", var_with_name_simple = True)
#path_save = '/afs/inf.ed.ac.uk/user/s12/s1264845/scratch/s1264845/mlp/saved_weights/'
print '.....training'
best_valid_loss = numpy.inf
epoch = 1
start_time = time.time()
while(learnrate_schedular.get_rate() != 0):
print 'learning_rate:', learnrate_schedular.get_rate()
print 'epoch_number:', learnrate_schedular.epoch
frames_showed, progress = 0, 0
start_epoch_time = time.time()
tqueue = TNetsCacheSimple.make_queue()
cache = TNetsCacheSimple(tqueue, shuffle_frames = True, offset=0, \
#.........这里部分代码省略.........