本文整理汇总了Python中model.Model.save_model方法的典型用法代码示例。如果您正苦于以下问题:Python Model.save_model方法的具体用法?Python Model.save_model怎么用?Python Model.save_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.save_model方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SemiLDA
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save_model [as 别名]
class SemiLDA(object):
def __init__(self, args):
self.args = args
self.model = Model()
try: # test: load model
if args.test:
self.model.load_model(args.model)
except: # train: init corpus and model
self.corpus = Corpus()
self.model.init_model(args)
if args.rule:
self.model.load_rules(args.rule)
self.corpus.init_corpus_and_model(args.train, self.model)
# init sampler
self.sampler = Sampler(self.model)
def train(self):
for i in xrange(self.args.burn_in):
self.sampler.sample_corpus(self.corpus)
if not self.args.slient:
loglike = self.sampler.loglikelihood(self.corpus)
print 'burn in:%s, loglikelihood:%s' % (i, loglike)
for i in xrange(self.args.max_iter):
self.sampler.sample_corpus(self.corpus)
self.model.accumulative()
if not self.args.slient:
loglike = self.sampler.loglikelihood(self.corpus)
print 'iter:%s, loglikelihood:%s' % (i, loglike)
self.model.save_model(self.args.model)
if self.args.dump:
self.model.dump_topic_words(self.args.dump)
def infer(self):
self.sampler.sample_test(self.args.test, self.args.output, self.args.burn_in, self.args.max_iter)
示例2: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save_model [as 别名]
#.........这里部分代码省略.........
r1 = n_correct_tag/n_gold_tag if n_parsed_tag else .0
if args.verbose > 2: print >> sys.stderr,"Tagging Precision:%s Recall:%s" % (p1,r1)
instance.comment['alignments'] += ''.join(' %s-%s|%s'%(idx-1,idx,instance.amr.get_pid(state.A.abt_node_table[idx])) for idx in state.A.abt_node_table if isinstance(idx,int))
aligned_instances.append(instance)
pseudo_gold_amr.append(GraphState.get_parsed_amr(state.A))
#gold_amr.append(instance.amr)
#assert set(state.A.tuples()) == set(instance.gold_graph.tuples())
pt = n_correct_total/n_parsed_total if n_parsed_total != .0 else .0
rt = n_correct_total/n_gold_total if n_gold_total !=.0 else .0
ft = 2*pt*rt/(pt+rt) if pt+rt != .0 else .0
write_parsed_amr(pseudo_gold_amr,aligned_instances,amr_file,'pseudo-gold',hand_alignments)
print "Total Accuracy: %s, Recall: %s, F-1: %s" % (pt,rt,ft)
tp = n_correct_tag_total/n_parsed_tag_total if n_parsed_tag_total != .0 else .0
tr = n_correct_tag_total/n_gold_tag_total if n_gold_tag_total != .0 else .0
print "Tagging Precision:%s Recall:%s" % (tp,tr)
#amr_parser.record_actions('data/action_set.txt')
elif args.mode == 'train': # training
print "Parser Config:"
print "Incorporate Coref Information: %s"%(constants.FLAG_COREF)
print "Incorporate SRL Information: %s"%(constants.FLAG_PROP)
print "Dependency parser used: %s"%(constants.FLAG_DEPPARSER)
train_instances = preprocess(amr_file,START_SNLP=False)
if args.dev: dev_instances = preprocess(args.dev,START_SNLP=False)
if args.section != 'all':
print "Choosing corpus section: %s"%(args.section)
tcr = constants.get_corpus_range(args.section,'train')
train_instances = train_instances[tcr[0]:tcr[1]]
if args.dev:
dcr = constants.get_corpus_range(args.section,'dev')
dev_instances = dev_instances[dcr[0]:dcr[1]]
feat_template = args.feat if args.feat else None
model = Model(elog=experiment_log)
#model.output_feature_generator()
parser = Parser(model=model,oracle_type=DET_T2G_ORACLE_ABT,action_type=args.actionset,verbose=args.verbose,elog=experiment_log)
model.setup(action_type=args.actionset,instances=train_instances,parser=parser,feature_templates_file=feat_template)
print >> experiment_log, "BEGIN TRAINING!"
for iter in xrange(1,args.iterations+1):
print >> experiment_log, "shuffling training instances"
random.shuffle(train_instances)
print >> experiment_log, "Iteration:",iter
begin_updates = parser.perceptron.get_num_updates()
parser.parse_corpus_train(train_instances)
parser.perceptron.average_weight()
#model.save_model(args.model+'-iter'+str(iter)+'-'+str(int(time.time()))+'.m')
model.save_model(args.model+'-iter'+str(iter)+'.m')
if args.dev:
print >> experiment_log ,"Result on develop set:"
_,parsed_amr = parser.parse_corpus_test(dev_instances)
write_parsed_amr(parsed_amr,dev_instances,args.dev,args.section+'.'+str(iter)+'.parsed')
print >> experiment_log ,"DONE TRAINING!"
elif args.mode == 'parse': # actual parsing
test_instances = preprocess(amr_file,START_SNLP=False,INPUT_AMR=False)
if args.section != 'all':
print "Choosing corpus section: %s"%(args.section)
tcr = constants.get_corpus_range(args.section,'test')
test_instances = test_instances[tcr[0]:tcr[1]]
#random.shuffle(test_instances)
print >> experiment_log, "Loading model: ", args.model
model = Model.load_model(args.model)
parser = Parser(model=model,oracle_type=DET_T2G_ORACLE_ABT,action_type=args.actionset,verbose=args.verbose,elog=experiment_log)
print >> experiment_log ,"BEGIN PARSING"
span_graph_pairs,results = parser.parse_corpus_test(test_instances)
write_parsed_amr(results,test_instances,amr_file,suffix='%s.parsed'%(args.section))
#write_span_graph(span_graph_pairs,test_instances,amr_file,suffix='spg.50')
################
# for eval #
################
#pickle.dump(span_graph_pairs,open('data/eval/%s_spg_pair.pkl'%(amr_file),'wb'),pickle.HIGHEST_PROTOCOL)
#pickle.dump(test_instances,open('data/eval/%s_instances.pkl'%(amr_file),'wb'),pickle.HIGHEST_PROTOCOL)
print >> experiment_log ,"DONE PARSING"
#plt.hist(results)
#plt.savefig('result.png')
elif args.mode == 'eval':
'''break down error analysis'''
# TODO: here use pickled file, replace it with parsed AMR and gold AMR
span_graph_pairs = pickle.load(open(args.eval[0],'rb'))
instances = pickle.load(open(args.eval[1],'rb'))
amr_parser = Parser(oracle_type=DET_T2G_ORACLE_ABT,verbose=args.verbose)
error_stat = defaultdict(lambda:defaultdict(lambda:defaultdict(list)))
for spg_pair,instance in zip(span_graph_pairs,instances):
amr_parser.errorAnalyze(spg_pair[0],spg_pair[1],instance,error_stat)
else:
arg_parser.print_help()
示例3: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save_model [as 别名]
#.........这里部分代码省略.........
#print >> log, ref_depGraph.print_tuples()
print >> log, amr_aligner.print_align_result(alresult,amr)
#raw_input('ENTER to continue')
counter += 1
pickle.dump(instances,open(gold_instances_file,'wb'),pickle.HIGHEST_PROTOCOL)
#pickle.dump(ref_graphs,open('./data/ref_graph.p','wb'),pickle.HIGHEST_PROTOCOL)
if LOGGED:
sys.stderr.close()
sys.stderr = saveerr
print >> log, "Done alignment and gold graph generation."
sys.exit()
# test user guide actions
elif args.mode == 'userGuide':
print 'Read in training instances...'
train_instances = preprocess(amr_file,False)
sentID = int(raw_input("Input the sent ID:"))
amr_parser = Parser()
amr_parser.testUserGuide(train_instances[sentID])
sys.exit()
# test deterministic oracle
elif args.mode == 'oracleGuide':
train_instances = preprocess(amr_file,False)
start_step = args.start_step
begin = args.begin
amr_parser = Parser(oracle_type=DETERMINE_TREE_TO_GRAPH_ORACLE_SC,verbose=args.verbose)
#ref_graphs = pickle.load(open('./data/ref_graph.p','rb'))
n_correct_total = .0
n_parsed_total = .0
n_gold_total = .0
pseudo_gold_amr = []
for instance in train_instances[begin:]:
state = amr_parser.testOracleGuide(instance,start_step)
n_correct_arc,n1,n_parsed_arc, n_gold_arc,_,_,_ = state.evaluate()
assert n_correct_arc == n1
n_correct_total += n_correct_arc
n_parsed_total += n_parsed_arc
n_gold_total += n_gold_arc
p = n_correct_arc/n_parsed_arc if n_parsed_arc else .0
indicator = 'PROBLEM!' if p < 0.5 else ''
if args.dev > 2: print >> sys.stderr, "Accuracy: %s %s\n" % (p,indicator)
#if instance.sentID == 704:
# import pdb
# pdb.set_trace()
pseudo_gold_amr.append(GraphState.get_parsed_amr(state.A))
#assert set(state.A.tuples()) == set(instance.gold_graph.tuples())
pt = n_correct_total/n_parsed_total if n_parsed_total != .0 else .0
rt = n_correct_total/n_gold_total if n_gold_total !=.0 else .0
ft = 2*pt*rt/(pt+rt) if pt+rt != .0 else .0
write_parsed_amr(pseudo_gold_amr,train_instances,amr_file,'pseudo-gold')
print "Total Accuracy: %s, Recall: %s, F-1: %s" % (pt,rt,ft)
#amr_parser.record_actions('data/action_set.txt')
elif args.mode == 'train': # actual parsing
train_instances = preprocess(amr_file,False)
if args.dev: dev_instances = preprocess(args.dev,False)
feat_template = args.feat if args.feat else None
model = Model(elog=experiment_log)
model.setup(action_type=args.actionset,instances=train_instances,feature_templates_file=feat_template)
#model.output_feature_generator()
parser = Parser(model=model,action_type=args.actionset,verbose=args.verbose,elog=experiment_log)
print >> experiment_log, "BEGIN TRAINING!"
for iter in xrange(1,args.iterations+1):
print >> experiment_log, "shuffling training instances"
random.shuffle(train_instances)
print >> experiment_log, "Iteration:",iter
begin_updates = parser.perceptron.get_num_updates()
parser.parse_corpus_train(train_instances)
parser.perceptron.average_weight()
#model.save_model(args.model+'-iter'+str(iter)+'-'+str(int(time.time()))+'.m')
model.save_model(args.model+'-iter'+str(iter)+'.m')
if args.dev:
print >> experiment_log ,"Result on develop set:"
parsed_amr = parser.parse_corpus_test(dev_instances)
write_parsed_amr(parsed_amr,dev_instances,args.dev)
print >> experiment_log ,"DONE TRAINING!"
elif args.mode == 'parse':
test_instances = preprocess(amr_file,False)
model = Model.load_model(args.model)
parser = Parser(model=model,action_type=args.actionset,verbose=args.verbose,elog=experiment_log)
print >> experiment_log ,"BEGIN PARSING"
results = parser.parse_corpus_test(test_instances)
write_parsed_amr(results,test_instances,amr_file)
print >> experiment_log ,"DONE PARSING"
#pickle.dump(results,open('data/gold_edge_graph.pkl','wb'),pickle.HIGHEST_PROTOCOL)
#plt.hist(results)
#plt.savefig('result.png')
else:
arg_parser.print_help()