本文整理汇总了Python中expiringdict.ExpiringDict方法的典型用法代码示例。如果您正苦于以下问题:Python expiringdict.ExpiringDict方法的具体用法?Python expiringdict.ExpiringDict怎么用?Python expiringdict.ExpiringDict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类expiringdict
的用法示例。
在下文中一共展示了expiringdict.ExpiringDict方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def __init__(self, listen=15, f_local=False, default_hook=None, object_hook=None):
assert V.DATA_PATH is not None, 'Setup p2p params before PeerClientClass init.'
# object control params
self.f_stop = False
self.f_finish = False
self.f_running = False
# co-objects
self.core = Core(host='localhost' if f_local else None, listen=listen)
self.peers = PeerData(os.path.join(V.DATA_PATH, 'peer.dat')) # {(host, port): header,..}
self.event = EventIgnition() # DirectCmdを受け付ける窓口
# data status control
self.broadcast_status: Dict[int, asyncio.Future] = ExpiringDict(max_len=5000, max_age_seconds=90)
self.result_futures: Dict[int, asyncio.Future] = ExpiringDict(max_len=5000, max_age_seconds=90)
# recode traffic if f_debug true
if Debug.F_RECODE_TRAFFIC:
self.core.traffic.recode_dir = V.DATA_PATH
# serializer/deserializer hook
self.default_hook = default_hook
self.object_hook = object_hook
示例2: __init__
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def __init__(self, host=None, listen=15):
assert V.DATA_PATH is not None, 'Setup p2p params before CoreClass init.'
assert host is None or host == 'localhost'
# status params
self.f_stop = False
self.f_finish = False
self.f_running = False
# working info
self.start_time = int(time())
self.number = 0
self.user: List[User] = list()
self.user_lock = asyncio.Lock()
self.host = host # local=>'localhost', 'global'=>None
self.core_que = asyncio.Queue()
self.backlog = listen
self.traffic = Traffic()
self.ping_status: Dict[int, asyncio.Event] = ExpiringDict(max_len=5000, max_age_seconds=900)
示例3: __init__
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def __init__(self, token, api_url_base=None, name=None, version=None, timeout_s=20, poll_time_s=60):
super(Bot, self).__init__()
self.log = logging.getLogger(__name__)
self.token = token
self.api_base_url = "https://api.icq.net/bot/v1" if api_url_base is None else api_url_base
self.name = name
self.version = version
self.timeout_s = timeout_s
self.poll_time_s = poll_time_s
self.last_event_id = 0
self.dispatcher = Dispatcher(self)
self.running = False
self._uin = token.split(":")[-1]
self.__lock = Lock()
self.__polling_thread = None
self.__sent_im_cache = ExpiringDict(max_len=2 ** 10, max_age_seconds=60)
self.dispatcher.add_handler(SkipDuplicateMessageHandler(self.__sent_im_cache))
示例4: timed_cached_func
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def timed_cached_func(max_len, max_age_seconds, ignore_self=False):
cache = ExpiringDict(max_len, max_age_seconds)
def decorator(fn):
def wrapper(*args, nocache=False): # args must be hashable
if ignore_self:
key = tuple(args[1:])
else:
key = tuple(args)
value = cache.get(key)
if nocache or value is None:
value = fn(*args)
cache[key] = value
return value
return wrapper
return decorator
示例5: LoadFromURN
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def LoadFromURN(self):
volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn, lexicon.AFF4_STORED)
#if not volume_urn:
# raise IOError("Unable to find storage for urn %s" % self.urn)
appendMode = self.resolver.GetUnique(lexicon.transient_graph,
self.urn , lexicon.AFF4_STREAM_WRITE_MODE)
if appendMode != None and str(appendMode) in ["truncate", "append", "random" ]:
self.properties.writable = True
self.lexicon = self.resolver.lexicon
self.chunk_size = int(self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.chunkSize) or 32 * 1024)
self.chunks_per_segment = int(self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.chunksPerSegment) or 1024)
sz = self.resolver.GetUnique(volume_urn, self.urn, self.lexicon.streamSize) or 0
self.size = int(sz)
self.compression = (self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.compressionMethod) or
lexicon.AFF4_IMAGE_COMPRESSION_ZLIB)
# A buffer for overlapped writes which do not fit into a chunk.
self.buffer = b""
# Compressed chunks in the bevy.
self.bevy = []
# Length of all chunks in the bevy.
self.bevy_length = 0
# List of (bevy offsets, compressed chunk length).
self.bevy_index = []
self.chunk_count_in_bevy = 0
self.bevy_number = 0
self.cache = ExpiringDict(max_len=1000, max_age_seconds=10)
# used for identifying in-place writes to bevys
self.bevy_is_loaded_from_disk = False
# used for identifying if a bevy now exceeds its initial size
self.bevy_size_has_changed = False
示例6: _query_dns
# 需要导入模块: import expiringdict [as 别名]
# 或者: from expiringdict import ExpiringDict [as 别名]
def _query_dns(domain, record_type, nameservers=None, timeout=2.0,
cache=None):
"""
Queries DNS
Args:
domain (str): The domain or subdomain to query about
record_type (str): The record type to query for
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
cache (ExpiringDict): Cache storage
Returns:
list: A list of answers
"""
domain = str(domain).lower()
record_type = record_type.upper()
cache_key = "{0}_{1}".format(domain, record_type)
if cache is None:
cache = DNS_CACHE
if cache:
records = cache.get(cache_key, None)
if records:
return records
resolver = dns.resolver.Resolver()
timeout = float(timeout)
if nameservers is None:
nameservers = ["1.1.1.1", "1.0.0.1",
"2606:4700:4700::1111", "2606:4700:4700::1001",
]
resolver.nameservers = nameservers
resolver.timeout = timeout
resolver.lifetime = timeout
if record_type == "TXT":
resource_records = list(map(
lambda r: r.strings,
resolver.query(domain, record_type, lifetime=timeout)))
_resource_record = [
resource_record[0][:0].join(resource_record)
for resource_record in resource_records if resource_record]
records = [r.decode() for r in _resource_record]
else:
records = list(map(
lambda r: r.to_text().replace('"', '').rstrip("."),
resolver.query(domain, record_type, lifetime=timeout)))
if cache:
cache[cache_key] = records
return records