当前位置: 首页>>代码示例>>Python>>正文


Python Parameters.load方法代码示例

本文整理汇总了Python中theano_toolkit.parameters.Parameters.load方法的典型用法代码示例。如果您正苦于以下问题:Python Parameters.load方法的具体用法?Python Parameters.load怎么用?Python Parameters.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano_toolkit.parameters.Parameters的用法示例。


在下文中一共展示了Parameters.load方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
class Model:
	"""
	Simple predictive model for forecasting words from
	sequence using LSTMs. Choose how many LSTMs to stack
	what size their memory should be, and how many
	words can be predicted.
	"""
	def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):

		# core layer in RNN/LSTM
		self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)

		# add an embedding
		self.model.layers.insert(0, Embedding(vocab_size, input_size))

		# add a classifier:
		self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))

                self.turing_params = Parameters()
		#init turing machine model
		self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)

		# inputs are matrices of indices,
		# each row is a sentence, each column a timestep
		self._stop_word   = theano.shared(np.int32(999999999), name="stop word")
		self.for_how_long = T.ivector()
		self.input_mat = T.imatrix()
		self.priming_word = T.iscalar()
		self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))

		# create symbolic variables for prediction:
		#change by darong #issue : what is greedy
		self.lstm_predictions = self.create_lstm_prediction()
		self.final_predictions = self.create_final_prediction()

		# create symbolic variable for greedy search:
		self.greedy_predictions = self.create_lstm_prediction(greedy=True)

		# create gradient training functions:
		self.create_cost_fun()#create 2 cost func(lstm final)

		self.lstm_lr = 0.01
		self.turing_lr = 0.01
		self.all_lr = 0.01
		self.create_training_function()#create 3 functions(lstm turing all)
		self.create_predict_function()#create 2 predictions(lstm final)

		# create ppl
		self.lstm_ppl = self.create_lstm_ppl()
		self.final_ppl = self.create_final_ppl()
		self.create_ppl_function()


	def save(self, save_file, vocab):
		pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
		pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
	def save_turing(self, save_file):
		self.turing_params.save(save_file + '.turing')


	def load(self, load_file, lr):
		self.model = pickle.load(open(load_file, "rb"))
		if os.path.isfile(load_file + '.turing') :
			self.turing_params.load(load_file + '.turing')			
		else :
			print "no turing model!!!! pretrain with lstm param"
			self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
			self.turing_params['W_read_hidden']  = self.model.layers[-1].params[0].get_value().T
			self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
                        temp = self.model.layers[1].initial_hidden_state.get_value()[self.hidden_size:]
			self.turing_params['memory_init'] = temp.reshape((1,)+temp.shape)

		# need to compile again for calculating predictions after loading lstm
		self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
		self.lstm_predictions = self.create_lstm_prediction()
		self.final_predictions = self.create_final_prediction()
		self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
		self.create_cost_fun()#create 2 cost func(lstm final)
		self.lstm_lr = lr
		self.turing_lr = lr#change this
		self.all_lr = lr
		self.create_training_function()#create 3 functions(lstm turing all)
		self.create_predict_function()#create 2 predictions(lstm final)
		self.lstm_ppl = self.create_lstm_ppl()
		self.final_ppl = self.create_final_ppl()
		self.create_ppl_function()
		print "done loading model"
#		print "done compile"


	def stop_on(self, idx):
		self._stop_word.set_value(idx)
		
	@property
	def params(self):
		return self.model.params
								 
	def create_lstm_prediction(self, greedy=False):
		def step(idx, *states):
			# new hiddens are the states we need to pass to LSTMs
#.........这里部分代码省略.........
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:103,代码来源:lm_v4.py

示例2: Parameters

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
import model
from theano_toolkit.parameters import Parameters
from theano_toolkit import updates


if __name__ == '__main__':
    model_filename = sys.argv[1]
    test_filename = sys.argv[2]
    train_filename = sys.argv[3]
    P = Parameters()
    data_X, df = data.load_test(test_filename, train_filename)
    f = model.build(P,
        input_size=data_X.shape[1],
        hidden_sizes=[256, 128, 64, 32]
    )
    X = T.matrix('X')
    predict = theano.function(
        inputs=[X],
        outputs=f(X, test=True) > 0.5,
    )
    P.load(model_filename)
    output = predict(data_X) 
    print data_X.shape
    print output.shape
    print df.values.shape
    df['probs'] = predict(data_X)
    df['Class'] = 'b'
    df['Class'][df.probs > 0.5] = 's'
    df['RankOrder'] = df.probs.rank(ascending=False,method='first').astype(int)
    df.to_csv('data/submission.csv', cols=['EventId','RankOrder','Class'], index=False)
开发者ID:shawntan,项目名称:higgs-boson,代码行数:32,代码来源:predict.py

示例3: Parameters

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
from theano_toolkit.parameters import Parameters

if __name__ == "__main__":
    model_file = args.model_file
    temp_input = args.temperature
    id2char = pickle.load(args.vocab_file)
    char2id = vocab.load(args.vocab_file.name)
    prime_str = args.prime

    P = Parameters()
    sampler = model.build_sampler(P,
                                  character_count=len(char2id) + 1,
                                  embedding_size=20,
                                  hidden_size=100
                                  )
    P.load(model_file)
    temp = T.scalar('temp')
    char = T.iscalar('char')
    p_cell_1, p_hidden_1, p_cell_2, p_hidden_2 = T.vector("p_cell_1"), T.vector("p_hidden_2"), T.vector("p_cell_2"), T.vector("p_hidden_2")

    output, cell_1, hidden_1, cell_2, hidden_2 = sampler(temp, char, p_cell_1, p_hidden_1, p_cell_2, p_hidden_2)
    sample = theano.function(
        inputs=[temp, char, p_cell_1, p_hidden_1, p_cell_2, p_hidden_2],
        outputs=[output, cell_1, hidden_1, cell_2, hidden_2]
    )

    orig_c1 = P.init_recurrent_1_cell.get_value()
    orig_h1 = T.tanh(P.init_recurrent_1_hidden).eval()
    orig_c2 = P.init_recurrent_2_cell.get_value()
    orig_h2 = T.tanh(P.init_recurrent_2_hidden).eval()
开发者ID:OlafLee,项目名称:theano-nlp,代码行数:32,代码来源:sample.py

示例4: Parameters

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
import theano.tensor as T
import numpy as np
from theano_toolkit import utils as U
from theano_toolkit import hinton
from theano_toolkit import updates
from theano_toolkit.parameters import Parameters

import ctc
import font
import lstm
from ocr import *

if __name__ == "__main__":
    import sys
    test_word = sys.argv[1]

    P = Parameters()
    X = T.matrix('X')

    predict = build_model(P,8,512,len(font.chars)+1)
    probs = predict(X)
    test = theano.function(inputs=[X],outputs=probs)
    P.load('model.pkl')
    image = font.imagify(test_word)
    hinton.plot(image.astype(np.float32).T[::-1])
    y_seq = label_seq(test_word)
    probs = test(image)
    print " ", ' '.join(font.chars[i] if i < len(font.chars) else "_" for i in np.argmax(probs,axis=1))
    hinton.plot(probs[:,y_seq].T,max_arr=1.)

开发者ID:Duum,项目名称:theano-ctc,代码行数:31,代码来源:ocr_test.py

示例5: zip

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
			'standard': [ (p, p - 0.001 * g) for p,g in zip(params,gradients) ],
#			'rmsprop' : updates.rmsprop(params,gradients),
#			'adadelta': updates.rmsprop(params,gradients),
		}
	P.save('init.pkl')
	for update_method in update_methods:
		print "Using update method:",update_method
		with open('train.%s.smart_init.log'%update_method,'w') as log:

			train = theano.function(
					inputs = [X],
					outputs = cost,
					updates = update_methods[update_method],
				)

			P.load('init.pkl')

			while True:
				cost_val = train(np.random.randint(0,8,size=20).astype(np.int32))
				log.write("%0.5f\n"%cost_val)
				print cost_val
				if cost_val < 0.01:
					break
		P.save('lstm.%s.smart_init.pkl'%update_method)

			


	

开发者ID:wavelets,项目名称:neural-qa,代码行数:26,代码来源:lstm.py

示例6: validate

# 需要导入模块: from theano_toolkit.parameters import Parameters [as 别名]
# 或者: from theano_toolkit.parameters.Parameters import load [as 别名]
        batched_stream = data_io.buffered_random(batched_stream, buffer_items=4)
        return batched_stream

    def validate():
        stream = data_io.stream_file('data/train.%02d.pklgz' % 0)
        stream = data_io.buffered_sort(stream, key=lambda x: x[1].shape[0], buffer_items=128)
        batched_stream = reader.batch_and_pad(stream, batch_size=32, mean=mean, std=std)

        total_cost = 0
        total_frames = 0
        for data, lengths in batched_stream:
            batch_avg_cost = test(data,lengths)
            batch_frames = np.sum(lengths)
            total_cost += batch_avg_cost * batch_frames
            total_frames += batch_frames
        return total_cost / total_frames

    import train_loop
    train_loop.run(
            data_iterator=stream,
            train_fun=lambda batch:train(batch[0],batch[1]),
            validation_score=validate,
            save_best_params=lambda:P.save('model.pkl'),
            load_best_params=lambda:P.load('model.pkl'),
            max_epochs=1000,
            patience=5000,
            patience_increase=2,
            improvement_threshold=0.999,
        )

开发者ID:mohammadpz,项目名称:variational-autoencoders,代码行数:31,代码来源:train.py


注:本文中的theano_toolkit.parameters.Parameters.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。