本文整理汇总了Python中ptime.time函数的典型用法代码示例。如果您正苦于以下问题:Python time函数的具体用法?Python time怎么用?Python time使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了time函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _try_do_lookup
def _try_do_lookup(self):
queries_to_send = []
current_time = time.time()
while self._pending_lookups:
pending_lookup = self._pending_lookups[0]
# Drop all pending lookups older than PENDING_LOOKUP_TIMEOUT
if time.time() > pending_lookup.start_ts + PENDING_LOOKUP_TIMEOUT:
del self._pending_lookups[0]
else:
break
if self._pending_lookups:
lookup_obj = self._pending_lookups[0]
else:
return queries_to_send
distance = lookup_obj.info_hash.distance(self._my_id)
bootstrap_rnodes = self._routing_m.get_closest_rnodes(distance.log,
0,
True)
#TODO: get the full bucket
if bootstrap_rnodes:
del self._pending_lookups[0]
# look if I'm tracking this info_hash
peers = self._tracker.get(lookup_obj.info_hash)
callback_f = lookup_obj.callback_f
if peers:
self._add_cache_peers(lookup_obj.info_hash, peers)
if callback_f and callable(callback_f):
callback_f(lookup_obj.lookup_id, peers, None)
# do the lookup
queries_to_send = lookup_obj.start(bootstrap_rnodes)
else:
next_lookup_attempt_ts = time.time() + .2
self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
next_lookup_attempt_ts)
return queries_to_send
示例2: main_loop
def main_loop(self):
"""
Perform maintenance operations. The main operation is routing table
maintenance where staled nodes are added/probed/replaced/removed as
needed. The routing management module specifies the implementation
details. This includes keeping track of queries that have not been
responded for a long time (timeout) with the help of
querier.Querier. The routing manager and the lookup manager will be
informed of those timeouts.
This method is designed to be used as minitwisted's heartbeat handler.
"""
queries_to_send = []
current_ts = time.time()
#TODO: I think this if should be removed
# At most, 1 second between calls to main_loop after the first call
if current_ts >= self._next_main_loop_call_ts:
self._next_main_loop_call_ts = current_ts + 1
else:
# It's too early
return self._next_main_loop_call_ts, []
# Retry failed lookup (if any)
queries_to_send.extend(self._try_do_lookup())
# Take care of timeouts
if current_ts >= self._next_timeout_ts:
(self._next_timeout_ts,
timeout_queries) = self._querier.get_timeout_queries()
for query in timeout_queries:
queries_to_send.extend(self._on_timeout(query))
# Routing table maintenance
if time.time() >= self._next_maintenance_ts:
(maintenance_delay,
queries,
maintenance_lookup) = self._routing_m.do_maintenance()
self._next_maintenance_ts = current_ts + maintenance_delay
self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
self._next_maintenance_ts)
queries_to_send.extend(queries)
if maintenance_lookup:
target, rnodes = maintenance_lookup
lookup_obj = self._lookup_m.maintenance_lookup(target)
queries_to_send.extend(lookup_obj.start(rnodes))
# Auto-save routing table
if current_ts >= self._next_save_state_ts:
state.save(self._my_id,
self._routing_m.get_main_rnodes(),
self.state_filename)
self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
self._next_maintenance_ts,
self._next_timeout_ts,
self._next_save_state_ts)
# Return control to reactor
datagrams_to_send = self._register_queries(queries_to_send)
return self._next_main_loop_call_ts, datagrams_to_send
示例3: test_timeouts_in_a_row
def test_timeouts_in_a_row(self):
rnode = RoutingNode(tc.NODES[0], 1)
eq_(rnode.timeouts_in_a_row(), 0)
eq_(rnode.timeouts_in_a_row(True), 0)
eq_(rnode.timeouts_in_a_row(False), 0)
# got query
rnode.add_event(time.time(), node.QUERY)
eq_(rnode.timeouts_in_a_row(), 0)
eq_(rnode.timeouts_in_a_row(True), 0)
eq_(rnode.timeouts_in_a_row(False), 0)
# got timeout
rnode.add_event(time.time(), node.TIMEOUT)
eq_(rnode.timeouts_in_a_row(), 1)
eq_(rnode.timeouts_in_a_row(True), 1)
eq_(rnode.timeouts_in_a_row(False), 1)
# got query
rnode.add_event(time.time(), node.QUERY)
eq_(rnode.timeouts_in_a_row(), 0)
eq_(rnode.timeouts_in_a_row(True), 0)
eq_(rnode.timeouts_in_a_row(False), 1)
# got timeout
rnode.add_event(time.time(), node.TIMEOUT)
eq_(rnode.timeouts_in_a_row(), 1)
eq_(rnode.timeouts_in_a_row(True), 1)
eq_(rnode.timeouts_in_a_row(False), 2)
# got response
rnode.add_event(time.time(), node.RESPONSE)
eq_(rnode.timeouts_in_a_row(), 0)
eq_(rnode.timeouts_in_a_row(True), 0)
eq_(rnode.timeouts_in_a_row(False), 0)
示例4: test_capture
def test_capture(self):
self.reactor.start_capture()
ts1 = time.time()
time.sleep(tc.TASK_INTERVAL/2)
# out > DATAGRAM1 (main_loop)
self.reactor.run_one_step()
ts2 = time.time()
incoming_datagram = Datagram(DATA1, tc.SERVER_ADDR)
self.reactor.s.put_datagram_received(incoming_datagram)
time.sleep(tc.TASK_INTERVAL/2)
self.reactor.run_one_step()
# in < incoming_datagram (socket)
# out > DATAGRAM3 (on_datagram_received)
captured_msgs = self.reactor.stop_and_get_capture()
eq_(len(captured_msgs), 3)
for msg in captured_msgs:
print msg
assert ts1 < captured_msgs[0][0] < ts2
eq_(captured_msgs[0][1], tc.SERVER_ADDR)
eq_(captured_msgs[0][2], True) #outgoing
eq_(captured_msgs[0][3], DATA1)
assert captured_msgs[1][0] > ts2
eq_(captured_msgs[1][1], DATAGRAM1.addr)
eq_(captured_msgs[1][2], False) #incoming
eq_(captured_msgs[1][3], DATAGRAM1.data)
assert captured_msgs[2][0] > captured_msgs[1][0]
eq_(captured_msgs[2][1], DATAGRAM3.addr)
eq_(captured_msgs[2][2], True) #outgoing
eq_(captured_msgs[2][3], DATAGRAM3.data)
示例5: _add_cache_peers
def _add_cache_peers(self, info_hash, peers):
oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
while self._cached_lookups and self._cached_lookups[0][0] < oldest_valid_ts:
# clean up old entries
del self._cached_lookups[0]
if self._cached_lookups and self._cached_lookups[-1][1] == info_hash:
self._cached_lookups[-1][2].extend(peers)
else:
self._cached_lookups.append((time.time(), info_hash, peers))
示例6: mark
def mark(self, msg=''):
if self.disabled:
return
t1 = ptime.time()
msg2 = " "+self.msg+" "+msg+" "+"%gms" % ((t1-self.t1)*1000)
if self.delayed:
self.msgs.append(msg2)
else:
print msg2
self.t1 = ptime.time() ## don't measure time it took to print
示例7: _update_rnode_on_timeout
def _update_rnode_on_timeout(self, rnode):
"""Register a timeout for this rnode.
You should call this method when getting a timeout for this node.
"""
rnode.last_action_ts = time.time()
rnode.msgs_since_timeout = 0
rnode.num_timeouts += 1
rnode.add_event(time.time(), node.TIMEOUT)
示例8: _update_rnode_on_query_received
def _update_rnode_on_query_received(self, rnode):
"""Register a query from node.
You should call this method when receiving a query from this node.
"""
current_time = time.time()
rnode.last_action_ts = time.time()
rnode.msgs_since_timeout += 1
rnode.num_queries += 1
rnode.add_event(current_time, node.QUERY)
rnode.last_seen = current_time
示例9: run_one_step
def run_one_step(self):
"""Main loop activated by calling self.start()"""
# Deal with call_asap requests
# TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
call_asap_tuple = None
self._lock.acquire()
try:
if self._call_asap_queue:
call_asap_tuple = self._call_asap_queue.pop(0)
finally:
self._lock.release()
if call_asap_tuple:
callback_f, args, kwds = call_asap_tuple
datagrams_to_send = callback_f(*args, **kwds)
for datagram in datagrams_to_send:
self._sendto(datagram)
# Call main_loop
if time.time() >= self._next_main_loop_call_ts:
(self._next_main_loop_call_ts,
datagrams_to_send) = self._main_loop_f()
for datagram in datagrams_to_send:
self._sendto(datagram)
# Get data from the network
try:
data, addr = self.s.recvfrom(BUFFER_SIZE)
except (socket.timeout):
pass # timeout
except (socket.error) as e:
logger.warning(
'Got socket.error when receiving data:\n%s' % e)
else:
self._add_capture((time.time(), addr, False, data))
ip_is_blocked = self.floodbarrier_active and \
self.floodbarrier.ip_blocked(addr[0])
if ip_is_blocked:
import sys
# print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', addr
# print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', `addr`
logger.warning("blocked")
# print >>sys.stderr, '>>>>>>>>>>>>>>>>>> DONE'
return
datagram_received = Datagram(data, addr)
(self._next_main_loop_call_ts,
datagrams_to_send) = self._on_datagram_received_f(
datagram_received)
for datagram in datagrams_to_send:
self._sendto(datagram)
示例10: test_complete_coverage
def test_complete_coverage(self):
eq_(self.rt.get_closest_rnodes(76, 8, False), [tc.CLIENT_NODE])
log_distance = self.my_node.log_distance(tc.SERVER_NODE)
str(self.rt.get_sbucket(log_distance).main)
repr(self.rt)
ok_(Bucket(1) != Bucket(2))
buckets = [Bucket(2), Bucket(2)]
buckets[0].add(tc.CLIENT_NODE.get_rnode(1))
buckets[1].add(tc.CLIENT_NODE.get_rnode(1))
buckets[0].add(tc.NODES[0].get_rnode(1))
buckets[1].add(tc.NODES[1].get_rnode(1))
ok_(buckets[0] != buckets[1])
eq_(buckets[0].get_freshest_rnode(), tc.NODES[0])
stalest_rnode = buckets[0].get_stalest_rnode()
eq_(stalest_rnode, tc.CLIENT_NODE)
# Dangerous!!!
stalest_rnode.last_seen = time.time()
eq_(buckets[0].get_freshest_rnode(), tc.CLIENT_NODE)
eq_(self.rt.find_next_bucket_with_room_index(tc.CLIENT_NODE), 0)
eq_(self.rt.find_next_bucket_with_room_index(log_distance=6), 7)
eq_(self.rt.find_next_bucket_with_room_index(log_distance=106), 107)
self.rt.print_stats()
示例11: __init__
def __init__(self, dht_addr, state_filename,
routing_m_mod, lookup_m_mod,
experimental_m_mod,
private_dht_name):
#TODO: don't do this evil stuff!!!
message.private_dht_name = private_dht_name
if size_estimation:
self._size_estimation_file = open('size_estimation.dat', 'w')
self.state_filename = state_filename
saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
if saved_id:
self._my_id = saved_id
else:
self._my_id = identifier.RandomId()
self._my_node = Node(dht_addr, self._my_id)
self._tracker = tracker.Tracker()
self._token_m = token_manager.TokenManager()
self._querier = Querier()
self._routing_m = routing_m_mod.RoutingManager(self._my_node,
saved_bootstrap_nodes)
self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
self._experimental_m = experimental_m_mod.ExperimentalManager(self._my_node.id)
current_ts = time.time()
self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
self._next_maintenance_ts = current_ts
self._next_timeout_ts = current_ts
self._next_main_loop_call_ts = current_ts
self._pending_lookups = []
示例12: print_table
def print_table(self):
header_format = '%6s %40s %10s %15s %5s %4s %8s'
data_format = '%6d %40r %10s %15s %5d %4d %9.2f'
header = header_format % (
'bucket', 'id', 'version', 'ip', 'port', 'rtt', 'uptime(h)')
#TODO: format uptime as hh:mm
thick_line = '=' * 95
thin_line = '-' * 95
print thick_line
print data_format % (-1, self.my_node.id,
version_repr(self.my_node.version),
self.my_node.addr[0], self.my_node.addr[1],
0, 0)
print thin_line
print header
print thin_line
current_time = time.time()
for rnode in self.get_main_rnodes():
if rnode.rtt == 99:
rtt = rnode.real_rtt
else:
rtt = rnode.rtt
print data_format % (
self.my_node.id.distance(rnode.id).log,
rnode.id, version_repr(rnode.version),
rnode.addr[0], rnode.addr[1],
rtt * 1000,
(current_time - rnode.creation_ts)/3600)
print thin_line
print header
print thick_line
示例13: run_one_step
def run_one_step(self):
"""Main loop activated by calling self.start()"""
# Deal with call_asap requests
#TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
call_asap_tuple = None
self._lock.acquire()
try:
if self._call_asap_queue:
call_asap_tuple = self._call_asap_queue.pop(0)
finally:
self._lock.release()
if call_asap_tuple:
callback_f, args, kwds = call_asap_tuple
datagrams_to_send = callback_f(*args, **kwds)
for datagram in datagrams_to_send:
self._sendto(datagram)
# Call main_loop
if time.time() >= self._next_main_loop_call_ts:
(self._next_main_loop_call_ts,
datagrams_to_send) = self._main_loop_f()
for datagram in datagrams_to_send:
self._sendto(datagram)
# Get data from the network
try:
data, addr = self.s.recvfrom(BUFFER_SIZE)
except (socket.timeout):
pass #timeout
except (socket.error), e:
logger.warning(
'Got socket.error when receiving data:\n%s' % e)
示例14: __init__
def __init__(self, dht_addr, state_path,
routing_m_mod, lookup_m_mod,
private_dht_name):
#TODO: don't do this evil stuff!!!
message.private_dht_name = private_dht_name
self.state_filename = os.path.join(state_path, STATE_FILENAME)
self.load_state()
if not self._my_id:
self._my_id = identifier.RandomId()
self._my_node = Node(dht_addr, self._my_id)
self._tracker = tracker.Tracker()
self._token_m = token_manager.TokenManager()
self._reactor = ThreadedReactor()
self._reactor.listen_udp(self._my_node.addr[1],
self._on_datagram_received)
#self._rpc_m = RPCManager(self._reactor)
self._querier = Querier(self._my_id)
bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
del self.loaded_nodes
self._routing_m = routing_m_mod.RoutingManager(self._my_node,
bootstrap_nodes)
# self._responder = Responder(self._my_id, self._routing_m,
# self._tracker, self._token_m)
self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
current_time = time.time()
self._next_maintenance_ts = current_time
self._next_save_state_ts = current_time + SAVE_STATE_DELAY
self._running = False
示例15: get_timeout_queries
def get_timeout_queries(self):
"""
Return a tupla with two items: (1) timestamp for next timeout, (2)
list of message.OutgoingQueryBase objects of those queries that have
timed-out.
"""
current_ts = time.time()
timeout_queries = []
while self._timeouts:
timeout_ts, query = self._timeouts[0]
if current_ts < timeout_ts:
next_timeout_ts = timeout_ts
break
self._timeouts = self._timeouts[1:]
addr_query_list = self._pending[query.dst_node.addr]
popped_query = addr_query_list.pop(0)
assert query == popped_query
if not addr_query_list:
# The list is empty. Remove the whole list.
del self._pending[query.dst_node.addr]
if not query.got_response:
timeout_queries.append(query)
if not self._timeouts:
next_timeout_ts = current_ts + TIMEOUT_DELAY
return next_timeout_ts, timeout_queries