当前位置: 首页>>代码示例>>Python>>正文


Python Parameters.save方法代码示例

本文整理汇总了Python中theano_toolkit.parameters.Parameters.save方法的典型用法代码示例。如果您正苦于以下问题:Python Parameters.save方法的具体用法?Python Parameters.save怎么用?Python Parameters.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano_toolkit.parameters.Parameters的用法示例。


在下文中一共展示了Parameters.save方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
class Model:
	"""
	Simple predictive model for forecasting words from
	sequence using LSTMs. Choose how many LSTMs to stack
	what size their memory should be, and how many
	words can be predicted.
	"""
	def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):

		# core layer in RNN/LSTM
		self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)

		# add an embedding
		self.model.layers.insert(0, Embedding(vocab_size, input_size))

		# add a classifier:
		self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))

                self.turing_params = Parameters()
		#init turing machine model
		self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)

		# inputs are matrices of indices,
		# each row is a sentence, each column a timestep
		self._stop_word   = theano.shared(np.int32(999999999), name="stop word")
		self.for_how_long = T.ivector()
		self.input_mat = T.imatrix()
		self.priming_word = T.iscalar()
		self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))

		# create symbolic variables for prediction:
		#change by darong #issue : what is greedy
		self.lstm_predictions = self.create_lstm_prediction()
		self.final_predictions = self.create_final_prediction()

		# create symbolic variable for greedy search:
		self.greedy_predictions = self.create_lstm_prediction(greedy=True)

		# create gradient training functions:
		self.create_cost_fun()#create 2 cost func(lstm final)

		self.lstm_lr = 0.01
		self.turing_lr = 0.01
		self.all_lr = 0.01
		self.create_training_function()#create 3 functions(lstm turing all)
		self.create_predict_function()#create 2 predictions(lstm final)

		# create ppl
		self.lstm_ppl = self.create_lstm_ppl()
		self.final_ppl = self.create_final_ppl()
		self.create_ppl_function()


	def save(self, save_file, vocab):
		pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
		pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
	def save_turing(self, save_file):
		self.turing_params.save(save_file + '.turing')


	def load(self, load_file, lr):
		self.model = pickle.load(open(load_file, "rb"))
		if os.path.isfile(load_file + '.turing') :
			self.turing_params.load(load_file + '.turing')			
		else :
			print "no turing model!!!! pretrain with lstm param"
			self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
			self.turing_params['W_read_hidden']  = self.model.layers[-1].params[0].get_value().T
			self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
                        temp = self.model.layers[1].initial_hidden_state.get_value()[self.hidden_size:]
			self.turing_params['memory_init'] = temp.reshape((1,)+temp.shape)

		# need to compile again for calculating predictions after loading lstm
		self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
		self.lstm_predictions = self.create_lstm_prediction()
		self.final_predictions = self.create_final_prediction()
		self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
		self.create_cost_fun()#create 2 cost func(lstm final)
		self.lstm_lr = lr
		self.turing_lr = lr#change this
		self.all_lr = lr
		self.create_training_function()#create 3 functions(lstm turing all)
		self.create_predict_function()#create 2 predictions(lstm final)
		self.lstm_ppl = self.create_lstm_ppl()
		self.final_ppl = self.create_final_ppl()
		self.create_ppl_function()
		print "done loading model"
#		print "done compile"


	def stop_on(self, idx):
		self._stop_word.set_value(idx)
		
	@property
	def params(self):
		return self.model.params
								 
	def create_lstm_prediction(self, greedy=False):
		def step(idx, *states):
			# new hiddens are the states we need to pass to LSTMs
#.........这里部分代码省略.........
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:103,代码来源:lm_v4.py

示例2: islice

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
		test_group_answers = islice(group_answers,test_instance_count)
		test_data = data_io.story_question_answer_idx(
						test_group_answers,
						vocab_in
					)
		test_data = ( x for x in test_data if x[1].shape[0] <= length_limit )
		tests = [ np.array(
					test(input_data,idxs,question_data,ans_w,ans_evds),
					dtype=np.float32
				)
				for input_data,idxs,question_data,ans_w,ans_evds in test_data ]
		errors = sum(tests)/len(tests)
		print "Error rate:",errors
		print "Starting epoch ",epoch
		if errors < best_error * 0.9 :
			P.save('model.pkl')
			print "Wrote model."
			best_error = errors
			length_limit += 2
		else:
#			learning_rate = learning_rate / 2
#			batch_size = max(1,batch_size//2)
#			print "Learning rate:",learning_rate
			P.save('tmp.model.pkl')
		buffer_size = 256 / batch_size

		train_group_answers = data_io.randomise(group_answers)
		training_data = data_io.story_question_answer_idx(train_group_answers,vocab_in)
		training_data = ( x for x in training_data if x[1].shape[0] <= length_limit )
		training_data = data_io.sortify(training_data,key=lambda x:x[1].shape[0])
		batched_training_data = data_io.batch(
开发者ID:wavelets,项目名称:neural-qa,代码行数:33,代码来源:train.py

示例3: zip

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
    acc = theano.function(
            inputs=[X, Y],
            outputs=cost,
            updates = [
                (a,a + g) for a,g in zip(gradient_acc,gradients)
            ] + [(counter,counter + np.float32(1.))]
        )
    update = theano.function(
            inputs=[],outputs=[],
            updates = updates.momentum(params,[ g / counter for g in gradient_acc ]) \
                    + [ (a, np.float32(0) * a) for a in gradient_acc ] \
                    + [ (counter,np.float32(0.)) ]
        )

    test = theano.function(
            inputs=[X,Y],
            outputs=probs[:,Y]
        )

    training_examples = [ word.strip() for word in open('dictionary.txt') ]
    import random
    for _ in xrange(1500):
        random.shuffle(training_examples)
        for i,string in enumerate(training_examples):
            print acc(font.imagify(string),label_seq(string))
            if i % 20 == 0: update()
            if i % 100 == 0:
                hinton.plot(test(font.imagify("test"),label_seq("test")).T,max_arr=1.)
                hinton.plot(font.imagify("test").T[::-1].astype('float32'))
        P.save('model.pkl')
开发者ID:Duum,项目名称:theano-ctc,代码行数:32,代码来源:ocr.py

示例4: xrange

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
    best_cost = np.inf
    increase_count = 0
    seen = 0
    for epoch in xrange(max_epochs):
        print "Epoch:", epoch + 1
        print "Batch size:", batch_size

        # Run test on validation set
        data_stream = data_io.stream(data_file, char2id)
        test_stream = islice(data_stream, validation_count)
        test_cost = test(test_stream)
        print "Perplexity:", test_cost

        if test_cost < improvement_threshold * best_cost:
            best_cost = test_cost
            P.save(output_file)
            increase_count = 0
        else:
            increase_count += 1
            if increase_count > patience:
                break

        # Run training
        data_stream = data_io.randomise(data_stream, buffer_size=1024)
        data_stream = data_io.sortify(data_stream, key=lambda x: len(x), buffer_size=512)
        batch_data_stream = data_io.batch(data_stream, batch_size=batch_size)
        batch_data_stream = data_io.randomise(batch_data_stream)

        for batch in batch_data_stream:
            avg_cost = train(batch)
            if np.isnan(avg_cost):
开发者ID:OlafLee,项目名称:theano-nlp,代码行数:33,代码来源:train.py

示例5: zip

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
	output = T.nnet.softmax(T.dot(hidden,P.W_output))
	delay = 5
	label = X[:-delay]
	predicted = output[delay:]

	cost = -T.sum(T.log(predicted[T.arange(predicted.shape[0]),label]))
	params = P.values()
	gradients = T.grad(cost,wrt=params)


	update_methods = {
			'standard': [ (p, p - 0.001 * g) for p,g in zip(params,gradients) ],
#			'rmsprop' : updates.rmsprop(params,gradients),
#			'adadelta': updates.rmsprop(params,gradients),
		}
	P.save('init.pkl')
	for update_method in update_methods:
		print "Using update method:",update_method
		with open('train.%s.smart_init.log'%update_method,'w') as log:

			train = theano.function(
					inputs = [X],
					outputs = cost,
					updates = update_methods[update_method],
				)

			P.load('init.pkl')

			while True:
				cost_val = train(np.random.randint(0,8,size=20).astype(np.int32))
				log.write("%0.5f\n"%cost_val)
开发者ID:wavelets,项目名称:neural-qa,代码行数:33,代码来源:lstm.py

示例6: validate

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import save [as 别名]
        batched_stream = data_io.buffered_random(batched_stream, buffer_items=4)
        return batched_stream

    def validate():
        stream = data_io.stream_file('data/train.%02d.pklgz' % 0)
        stream = data_io.buffered_sort(stream, key=lambda x: x[1].shape[0], buffer_items=128)
        batched_stream = reader.batch_and_pad(stream, batch_size=32, mean=mean, std=std)

        total_cost = 0
        total_frames = 0
        for data, lengths in batched_stream:
            batch_avg_cost = test(data,lengths)
            batch_frames = np.sum(lengths)
            total_cost += batch_avg_cost * batch_frames
            total_frames += batch_frames
        return total_cost / total_frames

    import train_loop
    train_loop.run(
            data_iterator=stream,
            train_fun=lambda batch:train(batch[0],batch[1]),
            validation_score=validate,
            save_best_params=lambda:P.save('model.pkl'),
            load_best_params=lambda:P.load('model.pkl'),
            max_epochs=1000,
            patience=5000,
            patience_increase=2,
            improvement_threshold=0.999,
        )

开发者ID:mohammadpz,项目名称:variational-autoencoders,代码行数:31,代码来源:train.py


注:本文中的theano_toolkit.parameters.Parameters.save方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。