本文整理汇总了Python中time.perf_counter方法的典型用法代码示例。如果您正苦于以下问题:Python time.perf_counter方法的具体用法?Python time.perf_counter怎么用?Python time.perf_counter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类time
的用法示例。
在下文中一共展示了time.perf_counter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _setup_query_latency_tracking
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def _setup_query_latency_tracking(self):
engine = self._engine.sync_engine
@event.listens_for(engine, "before_cursor_execute")
def before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
conn.info["query_start_time"] = perf_counter()
@event.listens_for(engine, "after_cursor_execute")
def after_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
conn.info["query_latency"] = perf_counter() - conn.info.pop(
"query_start_time"
)
示例2: setUp
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def setUp(self):
self.num_bboxes1 = 200
self.num_bboxes2 = 20000
self.image_width = 1024
self.image_height = 1024
self.bboxes1 = np.zeros([self.num_bboxes1, 4])
self.bboxes1[:, [0, 2]] = np.sort(np.random.rand(self.num_bboxes1, 2) * (self.image_width - 1))
self.bboxes1[:, [1, 3]] = np.sort(np.random.rand(self.num_bboxes1, 2) * (self.image_height - 1))
self.bboxes2 = np.zeros([self.num_bboxes2, 4])
self.bboxes2[:, [0, 2]] = np.sort(np.random.rand(self.num_bboxes2, 2) * (self.image_width - 1))
self.bboxes2[:, [1, 3]] = np.sort(np.random.rand(self.num_bboxes2, 2) * (self.image_height - 1))
self.bboxes1_tensor = torch.from_numpy(self.bboxes1)
self.bboxes2_tensor = torch.from_numpy(self.bboxes2)
start = time.perf_counter()
self.ious = bbox_overlaps(self.bboxes1_tensor, self.bboxes2_tensor).numpy()
elapsed = (time.perf_counter() - start)
print('bbox_overlaps time: ', elapsed)
示例3: bench
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def bench(f_):
timings_fwd = []
timings_bck = []
for _ in range(100):
with f_ as f:
tic = time.perf_counter()
f.forward()
torch.cuda.synchronize()
toc = time.perf_counter()
timings_fwd.append(toc - tic)
tic = time.perf_counter()
f.backward()
torch.cuda.synchronize()
toc = time.perf_counter()
timings_bck.append(toc - tic)
return (np.percentile(timings_fwd, [25, 50, 75]),
np.percentile(timings_bck, [25, 50, 75]))
示例4: __init__
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def __init__(self, *args, **kwargs):
ws = kwargs.pop('ws', None)
interval = kwargs.pop('interval', None)
shard_id = kwargs.pop('shard_id', None)
threading.Thread.__init__(self, *args, **kwargs)
self.ws = ws
self._main_thread_id = ws.thread_id
self.interval = interval
self.daemon = True
self.shard_id = shard_id
self.msg = 'Keeping websocket alive with sequence %s.'
self.block_msg = 'Heartbeat blocked for more than %s seconds.'
self.behind_msg = 'Can\'t keep up, websocket is %.1fs behind.'
self._stop_ev = threading.Event()
self._last_ack = time.perf_counter()
self._last_send = time.perf_counter()
self.latency = float('inf')
self.heartbeat_timeout = ws._max_heartbeat_timeout
示例5: youtube_daemon_worker
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def youtube_daemon_worker(id, youtube_queue, social_context_queue, youtube_module_communication, youtube_oauth_credentials_folder):
while True:
if social_context_queue.qsize() > 50:
time.sleep(10.0)
else:
url_counter, url, upper_timestamp = youtube_queue.get()
try:
# start_time = time.perf_counter()
social_context = youtube_social_context.collect(url, youtube_module_communication + "_" + str(id), youtube_oauth_credentials_folder)
# elapsed_time = time.perf_counter() - start_time
# if social_context is not None:
# social_context["elapsed_time"] = elapsed_time
except KeyError:
social_context = None
social_context_queue.put((url_counter, social_context))
youtube_queue.task_done()
示例6: reddit_daemon_worker
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def reddit_daemon_worker(id, reddit_queue, social_context_queue, reddit_oauth_credentials_path):
while True:
if social_context_queue.qsize() > 50:
time.sleep(10.0)
else:
url_counter, url, upper_timestamp = reddit_queue.get()
try:
# start_time = time.perf_counter()
social_context = reddit_social_context.collect(url, reddit_oauth_credentials_path)
# elapsed_time = time.perf_counter() - start_time
# if social_context is not None:
# social_context["elapsed_time"] = elapsed_time
except KeyError:
social_context = None
social_context_queue.put((url_counter, social_context))
reddit_queue.task_done()
示例7: form_graphs
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def form_graphs(social_context_generator, assessment_timestamp):
# fp = open("/home/georgerizos/Documents/fetch_times/build_graph_time" + ".txt", "a")
for social_context_dict in social_context_generator:
# start_time = time.perf_counter()
snapshots,\
targets,\
title = get_snapshot_graphs(social_context_dict["social_context"],
# social_context_dict["tweet_timestamp"],
assessment_timestamp,
social_context_dict["platform_name"])
# elapsed_time = time.perf_counter() - start_time
# fp.write(repr(elapsed_time) + "\n")
if snapshots is None:
continue
if len(snapshots) > 1:
graph_dict = social_context_dict
graph_dict["snapshots"] = snapshots
graph_dict["targets"] = targets
graph_dict["title"] = title
yield graph_dict
示例8: train_runtime
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def train_runtime(model, data, epochs, device):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
model = model.to(device)
data = data.to(device)
model.train()
mask = data.train_mask if 'train_mask' in data else data.train_idx
y = data.y[mask] if 'train_mask' in data else data.train_y
if torch.cuda.is_available():
torch.cuda.synchronize()
t_start = time.perf_counter()
for epoch in range(epochs):
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[mask], y)
loss.backward()
optimizer.step()
if torch.cuda.is_available():
torch.cuda.synchronize()
t_end = time.perf_counter()
return t_end - t_start
示例9: _onepop_expgrowth
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def _onepop_expgrowth(
engine_id, out_dir, seed, N0=5000, N1=500, T=1000, **sim_kwargs):
growth_rate = - np.log(N1 / N0) / T
species = stdpopsim.get_species("DroMel")
contig = species.get_contig("chr2R", length_multiplier=0.01) # ~250 kb
contig = irradiate(contig)
model = _PiecewiseSize(N0, growth_rate, (T, N1, 0))
model.generation_time = species.generation_time
samples = model.get_samples(100)
engine = stdpopsim.get_engine(engine_id)
t0 = time.perf_counter()
ts = engine.simulate(model, contig, samples, seed=seed, **sim_kwargs)
t1 = time.perf_counter()
out_file = out_dir / f"{seed}.trees"
ts.dump(out_file)
return out_file, t1 - t0
示例10: _twopop_IM
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def _twopop_IM(
engine_id, out_dir, seed,
NA=1000, N1=500, N2=5000, T=1000, M12=0, M21=0, pulse=None, samples=None,
**sim_kwargs):
species = stdpopsim.get_species("AraTha")
contig = species.get_contig("chr5", length_multiplier=0.01) # ~270 kb
contig = irradiate(contig)
model = stdpopsim.IsolationWithMigration(
NA=NA, N1=N1, N2=N2, T=T, M12=M12, M21=M21)
if pulse is not None:
model.demographic_events.append(pulse)
model.demographic_events.sort(key=lambda x: x.time)
# XXX: AraTha has species.generation_time == 1, but there is the potential
# for this to mask bugs related to generation_time scaling, so we use 3 here.
model.generation_time = 3
if samples is None:
samples = model.get_samples(50, 50, 0)
engine = stdpopsim.get_engine(engine_id)
t0 = time.perf_counter()
ts = engine.simulate(model, contig, samples, seed=seed, **sim_kwargs)
t1 = time.perf_counter()
out_file = out_dir / f"{seed}.trees"
ts.dump(out_file)
return out_file, t1 - t0
示例11: _run_synchronous
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def _run_synchronous(self, progress):
a = np.zeros(self.num_sites, dtype=np.int8)
for t, focal_sites in self.descriptors:
before = time.perf_counter()
s, e = self.ancestor_builder.make_ancestor(focal_sites, a)
duration = time.perf_counter() - before
logger.debug(
"Made ancestor in {:.2f}s at timepoint {} (epoch {}) "
"from {} to {} (len={}) with {} focal sites ({})".format(
duration,
t,
self.timepoint_to_epoch[t],
s,
e,
e - s,
focal_sites.shape[0],
focal_sites,
)
)
self.ancestor_data.add_ancestor(
start=s, end=e, time=t, focal_sites=focal_sites, haplotype=a[s:e]
)
progress.update()
示例12: metrics
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def metrics(self):
if self._run_t is None:
activity_ratio = 0
else:
total_t = perf_counter() - self._run_t
activity_ratio = self.t_active / (total_t)
own_computations = { c.name for c in self.computations(include_technical=True)}
m = {
'count_ext_msg': {k: v
for k, v in self._messaging.count_ext_msg.items()
if k in own_computations},
'size_ext_msg': {k: v
for k, v in self._messaging.size_ext_msg.items()
if k in own_computations},
# 'last_msg_time': self._messaging.last_msg_time,
'activity_ratio': activity_ratio,
'cycles': {c.name: c.cycle_count for c in self.computations()}
}
return m
示例13: main
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def main(arguments):
global verbose
(ids, names, result) = check_args(arguments)
if arguments.v:
verbose = True
if not result:
return 0
acc_time = 0
len_name = len(names)
table = []
for i in range(0, len_name + 1):
table.append([])
table[0].append("")
for i in range(0, len_name):
table[0].append(names[i])
for i in range(0, len_name):
table[i+1].append(names[i])
for j in range(0, len_name):
if i != j:
start = time.perf_counter()
table[i+1].append(compute_files(names[i], names[j], ids, args.fd, args.r))
end = time.perf_counter()
if table[i+1][-1] != -1.0:
acc_time += end-start
else:
table[i+1].append("")
# check table
for i in range(0, len_name + 1):
for j in range(0, len_name + 1):
if i != j:
if table[i][j] != table[j][i]:
if table[i][j] > table[j][i]:
table[j][i] = table[i][j]
else:
table[i][j] = table[j][i]
pprint_table(table)
return acc_time
示例14: pingt
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def pingt(self,ctx):
"""pseudo-ping time"""
channel = ctx.message.channel
t1 = time.perf_counter()
await self.bot.send_typing(channel)
t2 = time.perf_counter()
await self.bot.say("pseudo-ping: {}ms".format(round((t2-t1)*1000)))
示例15: perf_counter
# 需要导入模块: import time [as 别名]
# 或者: from time import perf_counter [as 别名]
def perf_counter() :
return ticks_ms() / 1000
# ============================================================================
# ===( XAsyncSocketsPool )====================================================
# ============================================================================