本文整理汇总了Python中timer.stop函数的典型用法代码示例。如果您正苦于以下问题:Python stop函数的具体用法?Python stop怎么用?Python stop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stop函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: link
def link(self, m1, m2, hypothetical=False, beta=1):
timer.start("link")
if m1 == -1:
return self.get_f1(beta=beta) if hypothetical else None
c1, c2 = self.mention_to_cluster[m1], self.mention_to_cluster[m2]
assert c1 != c2
new_c = c1 + c2
p_num, r_num, p_den, r_den = self.p_num, self.r_num, self.p_den, self.r_den
if len(c1) == 1:
self.p_den += 1
if len(c2) == 1:
self.p_den += 1
self.update_b3(new_c, hypothetical=hypothetical)
if hypothetical:
f1 = evaluation.f1(self.p_num, self.p_den, self.r_num, self.r_den, beta=beta)
self.p_num, self.r_num, self.p_den, self.r_den = p_num, r_num, p_den, r_den
timer.stop("link")
return f1
else:
self.ana_to_ant[m2] = m1
self.ant_to_anas[m1].append(m2)
self.clusters.remove(c1)
self.clusters.remove(c2)
self.clusters.append(new_c)
for m in new_c:
self.mention_to_cluster[m] = new_c
timer.stop("link")
示例2: unlink
def unlink(self, m):
timer.start("unlink")
old_ant = self.ana_to_ant[m]
if old_ant != -1:
self.ana_to_ant[m] = -1
self.ant_to_anas[old_ant].remove(m)
old_c = self.mention_to_cluster[m]
c1 = [m]
frontier = self.ant_to_anas[m][:]
while len(frontier) > 0:
m = frontier.pop()
c1.append(m)
frontier += self.ant_to_anas[m]
c1 = tuple(c1)
c2 = tuple(m for m in old_c if m not in c1)
self.update_b3(c1)
self.update_b3(c2)
self.clusters.remove(old_c)
self.clusters.append(c1)
self.clusters.append(c2)
for m in c1:
self.mention_to_cluster[m] = c1
for m in c2:
self.mention_to_cluster[m] = c2
timer.stop("unlink")
示例3: update_b3
def update_b3(self, c, hypothetical=False):
timer.start("update b3")
if len(c) == 1:
self.p_den -= 1
self.p_num -= self.ps[c[0]]
self.r_num -= self.rs[c[0]]
self.ps[c[0]] = 0
self.rs[c[0]] = 0
else:
intersect_counts = Counter()
for m in c:
if m in self.mention_to_gold:
intersect_counts[self.mention_to_gold[m]] += 1
for m in c:
if m in self.mention_to_gold:
self.p_num -= self.ps[m]
self.r_num -= self.rs[m]
g = self.mention_to_gold[m]
ic = intersect_counts[g]
self.p_num += ic / float(len(c))
self.r_num += ic / float(len(g))
if not hypothetical:
self.ps[m] = ic / float(len(c))
self.rs[m] = ic / float(len(g))
timer.stop("update b3")
示例4: hashstring
def hashstring( str ):
timer.start('utils.hashstring')
key = hashlib.sha1()
key.update( str )
key = key.hexdigest()
timer.stop('utils.hashstring')
return key
示例5: finish_workers
def finish_workers( self ):
timer.start('work.finish.workers')
finished_worker_cnt = 0
for k, worker in enumerate( self.workers ):
if worker.process.poll() is not None:
(stdout, stderr) = worker.process.communicate()
print "Local execution complete (%s): %s (return code %d)" % (worker.call.cbid, worker.call.body['cmd'], worker.process.returncode)
finished_worker_cnt += 1
if worker.process.returncode != 0:
if len(stdout)>0:
d( 'exec', 'stdout:\n', stdout )
if len(stderr)>0:
d( 'exec', 'stderr:\n', stderr )
else:
if len(stdout)>0:
print 'stdout:\n', stdout
if len(stderr)>0:
print 'stderr:\n', stderr
worker.finish()
del self.workers[k]
db.task_del( worker.call.cbid )
timer.stop('work.finish.workers')
return finished_worker_cnt
示例6: start_workers
def start_workers( self ):
global first_try
timer.start('work.start.workers')
started_worker_cnt = 0
slots = glob.exec_local_concurrency - len(self.workers)
batch = db.task_claim( slots )
sys.stdout.write('.')
sys.stdout.flush()
if batch:
calls = db.task_get( batch )
for call in calls:
self.execute( call )
started_worker_cnt += 1
elif len(self.workers)==0 and db.task_remain( glob.workflow_id )==0 and self.total_tasks>0:
timer.report()
return -2
#sys.exit(0)
if first_try:
if started_worker_cnt==0:
print 'Nothing to execute.'
timer.report()
return -1
#sys.exit(0)
first_try = False
self.total_tasks += started_worker_cnt
timer.stop('work.start.workers')
return started_worker_cnt
示例7: finish_workers
def finish_workers( self ):
timer.start('work.finish.workers')
finished_worker_cnt = 0
t = self.wq.wait(10)
if t:
worker = self.workers[t.id]
print "WQ execution (%s) completed in %s: %s (return code %d)" % (worker.call.cbid, worker.sandbox, worker.call.body['cmd'], t.return_status)
if t.return_status != 0:
self.fails.append( worker.call )
if worker.debug( t ):
self.wq.blacklist( t.host )
node = self.db.fetch( worker.call.key )
self.execute( node.obj )
else:
worker.finish()
self.worker_cnt -= 1
del self.workers[t.id]
db.task_del( worker.call.cbid )
timer.stop('work.finish.workers')
return finished_worker_cnt
示例8: task_get
def task_get( self, batch ):
calls = []
timer.start('db.task.get')
while True:
try:
conn, log = (self.tconn, self.tlog)
with conn:
curs = conn.cursor()
curs.execute('SELECT cbid FROM todos WHERE assigned=?', (batch,) )
res = curs.fetchall()
for r in res:
call = self.find_one( r['cbid'] )
if call:
calls.append( call )
except sqlite3.OperationalError:
print 'Database (todos) is locked on task_get'
time.sleep(1)
continue
break
timer.stop('db.task.get')
return calls
示例9: run_agent
def run_agent(self, s, beta=0, iteration=1):
timer.start("running agent")
merged_pairs = []
while not s.is_complete():
example = s.get_example(self.training)
n_candidates = example['starts'].size + 1
if self.training:
self.replay_memory.update(example)
if random.random() > beta:
if iteration == -1:
i = n_candidates - 1
else:
timer.start("predict")
scores = self.model.predict_on_batch(example)[0]
if self.training:
self.loss_aggregator.update(np.sum(scores * example['costs']))
i = np.argmax(scores[:, 0])
timer.stop("predict")
else:
i = np.argmin(example['costs'][:, 0])
if i != n_candidates - 1:
merged_pairs.append((s.candidate_antecedents[i], s.current_mention))
s.do_action(i)
timer.stop("running agent")
return merged_pairs
示例10: start_workers
def start_workers( self ):
global first_try
timer.start('work.start.workers')
started_worker_cnt = 0
slots = self.wq.hungry()
if slots==100:
slots = 20
#if slots>0 and slots<25:
# slots = 25
batch = db.task_claim( slots )
if batch:
sys.stdout.write('.')
sys.stdout.flush()
calls = db.task_get( batch )
for call in calls:
self.execute( call )
started_worker_cnt += 1
elif len(self.workers)==0 and db.task_cnt()==0:
time.sleep(5)
sys.stdout.write(',')
sys.stdout.flush()
timer.report()
return -1
#sys.exit(0)
self.total_tasks += started_worker_cnt
timer.stop('work.start.workers')
return started_worker_cnt
示例11: task_prep
def task_prep( self, item ):
calls = []
timer.start('db.task.prep')
while True:
try:
conn, log = (self.tconn, self.tlog)
with conn:
# Check if task is already queued
curs = conn.cursor()
curs.execute('SELECT cbid FROM todos WHERE next_arg IN (?,?)', (item.cbid, item.dbid,) )
res = curs.fetchall()
for r in res:
call = self.find_one( r['cbid'] )
if call:
# Update next_arg for task
self.task_update( call )
except sqlite3.OperationalError:
print 'Database (todos) is locked on task_prep'
time.sleep(1)
continue
break
timer.stop('db.task.prep')
return calls
示例12: connect
def connect( self ):
global dlog, clog
timer.start('db.connect')
self.dconn, dlog = self.sqlite3_connect(
glob.data_db_pathname, glob.data_log_pathname, 'data_db_logger', glob.data_file_directory )
self.cconn, clog = self.sqlite3_connect(
glob.cache_db_pathname, glob.cache_log_pathname, 'cache_db_logger', glob.cache_file_directory )
self.trconn, clog = self.sqlite3_connect(
glob.trash_db_pathname, glob.trash_log_pathname, 'trash_db_logger', glob.trash_file_directory )
self.tconn, self.tlog = self.sqlite3_connect( glob.work_db_pathname, glob.work_log_pathname )
db_init_str = """
CREATE TABLE IF NOT EXISTS items (
id INTEGER NOT NULL, type TEXT, cbid TEXT, dbid TEXT, wfid UUID, step TEXT,
"when" FLOAT, meta TEXT, body TEXT, repo UUID, path TEXT, size INTEGER,
PRIMARY KEY (id)
);
"""
db_init_todos = """
CREATE TABLE IF NOT EXISTS todos (
id INTEGER NOT NULL, cbid TEXT, wfid UUID, step TEXT, priority INTEGER DEFAULT 0,
next_arg TEXT, assigned TEXT, failures INTEGER DEFAULT 0,
PRIMARY KEY (id)
);
"""
curs = self.dconn.cursor()
curs.execute( db_init_str )
curs.execute( 'CREATE INDEX IF NOT EXISTS itcbids ON items(cbid);' )
curs.execute( 'CREATE INDEX IF NOT EXISTS itdbids ON items(dbid);' )
self.dconn.commit()
curs = self.cconn.cursor()
curs.execute( db_init_str )
curs.execute( 'CREATE INDEX IF NOT EXISTS itcbids ON items(cbid);' )
curs.execute( 'CREATE INDEX IF NOT EXISTS itdbids ON items(dbid);' )
self.cconn.commit()
curs = self.trconn.cursor()
curs.execute( db_init_str )
curs.execute( 'CREATE INDEX IF NOT EXISTS itcbids ON items(cbid);' )
curs.execute( 'CREATE INDEX IF NOT EXISTS itdbids ON items(dbid);' )
self.trconn.commit()
curs = self.tconn.cursor()
curs.execute( db_init_todos )
curs.execute( 'CREATE INDEX IF NOT EXISTS tdnext ON todos(next_arg, assigned);' )
self.tconn.commit()
self.tconn.isolation_level = 'EXCLUSIVE'
timer.stop('db.connect')
示例13: evaluate_model
def evaluate_model(dataset, docs, model, model_props, stats, save_output=False, save_scores=False,
print_table=False):
prog = utils.Progbar(dataset.n_batches)
mt = RankingMetricsTracker(dataset.name, model_props=model_props) \
if model_props.ranking else ClassificationMetricsTracker(dataset.name)
mta = ClassificationMetricsTracker(dataset.name + " anaphoricity", anaphoricity=True)
docs_by_id = {doc.did: doc for doc in docs} if model_props.ranking else {}
saved_links, saved_scores = (defaultdict(list) if save_output else None,
defaultdict(dict) if save_scores else None)
for i, X in enumerate(dataset):
if X['y'].size == 0:
continue
progress = []
scores = model.predict_on_batch(X)
if model_props.ranking:
update_doc(docs_by_id[X['did']], X, scores,
saved_links=saved_links, saved_scores=saved_scores)
if model_props.anaphoricity and not model_props.ranking:
progress.append(("anaphoricity loss", mta.update(X, scores[0][:, 0])))
if not model_props.anaphoricity_only:
progress.append(("loss", mt.update(
X, scores if model_props.ranking else
scores[1 if model_props.anaphoricity else 0][:, 0])))
prog.update(i + 1, exact=progress)
if save_scores:
print "Writing scores"
utils.write_pickle(saved_scores, model_props.path + dataset.name + '_scores.pkl')
if save_output:
print "Writing output"
utils.write_pickle(saved_links, model_props.path + dataset.name + '_links.pkl')
utils.write_pickle(docs, model_props.path + dataset.name + '_processed_docs.pkl')
timer.start("metrics")
if model_props.ranking:
stats.update(compute_metrics(docs, dataset.name))
stats["validate time"] = time.time() - prog.start
if model_props.anaphoricity and not model_props.ranking:
mta.finish(stats)
if not model_props.anaphoricity_only:
mt.finish(stats)
timer.stop("metrics")
if print_table:
print " & ".join(map(lambda x: "{:.2f}".format(x * 100), [
stats[dataset.name + " muc precision"],
stats[dataset.name + " muc recall"],
stats[dataset.name + " muc"],
stats[dataset.name + " b3 precision"],
stats[dataset.name + " b3 recall"],
stats[dataset.name + " b3"],
stats[dataset.name + " ceafe precision"],
stats[dataset.name + " ceafe recall"],
stats[dataset.name + " ceafe"],
stats[dataset.name + " conll"],
]))
示例14: train
def train(self):
timer.start("train")
X = self.memory.pop(int(random.random() * len(self.memory)))
self.train_on_example(X)
self.size -= 1
timer.stop("train")
if self.trainer.n == 1:
print "Start training!"
print
示例15: dump
def dump( self, key, pathname ):
timer.start('db.dump')
item = self.find_one( key )
if not item:
print 'Not ready yet: %s' % key
else:
with open( pathname, 'w' ) as f:
item.stream_content( f )
timer.stop('db.dump')