本文整理汇总了Python中bintrees.FastRBTree.values方法的典型用法代码示例。如果您正苦于以下问题:Python FastRBTree.values方法的具体用法?Python FastRBTree.values怎么用?Python FastRBTree.values使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bintrees.FastRBTree
的用法示例。
在下文中一共展示了FastRBTree.values方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TDigest
# 需要导入模块: from bintrees import FastRBTree [as 别名]
# 或者: from bintrees.FastRBTree import values [as 别名]
class TDigest(object):
def __init__(self, delta=0.01, K=25):
self.C = RBTree()
self.n = 0
self.delta = delta
self.K = K
def __add__(self, other_digest):
C1 = list(self.C.values())
C2 = list(other_digest.C.values())
shuffle(C1)
shuffle(C2)
data = C1 + C2
new_digest = TDigest(self.delta, self.K)
for c in data:
new_digest.update(c.mean, c.count)
return new_digest
def __len__(self):
return len(self.C)
def __repr__(self):
return """<T-Digest: n=%d, centroids=%d>""" % (self.n, len(self))
def _add_centroid(self, centroid):
if centroid.mean not in self.C:
self.C.insert(centroid.mean, centroid)
else:
self.C[centroid.mean].update(centroid.mean, centroid.count)
def _compute_centroid_quantile(self, centroid):
denom = self.n
cumulative_sum = sum(
c_i.count for c_i in self.C.value_slice(-float('Inf'), centroid.mean))
return (centroid.count / 2. + cumulative_sum) / denom
def _update_centroid(self, centroid, x, w):
self.C.pop(centroid.mean)
centroid.update(x, w)
self._add_centroid(centroid)
def _find_closest_centroids(self, x):
try:
ceil_key = self.C.ceiling_key(x)
except KeyError:
floor_key = self.C.floor_key(x)
return [self.C[floor_key]]
try:
floor_key = self.C.floor_key(x)
except KeyError:
ceil_key = self.C.ceiling_key(x)
return [self.C[ceil_key]]
if abs(floor_key - x) < abs(ceil_key - x):
return [self.C[floor_key]]
elif abs(floor_key - x) == abs(ceil_key - x) and (ceil_key != floor_key):
return [self.C[ceil_key], self.C[floor_key]]
else:
return [self.C[ceil_key]]
def _theshold(self, q):
return 4 * self.n * self.delta * q * (1 - q)
def update(self, x, w=1):
"""
Update the t-digest with value x and weight w.
"""
self.n += w
if len(self) == 0:
self._add_centroid(Centroid(x, w))
return
S = self._find_closest_centroids(x)
while len(S) != 0 and w > 0:
j = choice(list(range(len(S))))
c_j = S[j]
q = self._compute_centroid_quantile(c_j)
# This filters the out centroids that do not satisfy the second part
# of the definition of S. See original paper by Dunning.
if c_j.count + w > self._theshold(q):
S.pop(j)
continue
delta_w = min(self._theshold(q) - c_j.count, w)
self._update_centroid(c_j, x, delta_w)
w -= delta_w
S.pop(j)
if w > 0:
self._add_centroid(Centroid(x, w))
if len(self) > self.K / self.delta:
#.........这里部分代码省略.........
示例2: MMCPolicy
# 需要导入模块: from bintrees import FastRBTree [as 别名]
# 或者: from bintrees.FastRBTree import values [as 别名]
#.........这里部分代码省略.........
if self.draw_dump:
dump_cache(self, self.csv_suffix)
def add_trace_record(self, record):
self.ts_datapoint['row'] = self.num_requests
if self.was_hit:
self.ts_datapoint['hit'] = 1
else:
self.ts_datapoint['hit'] = 0
if self.was_ghost_hit:
self.ts_datapoint['ghost_hit'] = 1
else:
self.ts_datapoint['ghost_hit'] = 0
self.ts_datapoint['tau'] = self.tau[0]
self.ts_datapoint['theta0'] = self.theta[0]
self.ts_datapoint['theta1'] = self.theta[1]
depth = record.depth
self.ts_datapoint['depth'] = depth
self.ts_datapoint['rank'] = record.node.rank
self.ts_datapoint['Z'] = record.Z
self.ts_writer.writerow(
[self.ts_datapoint[key] for key in self.ts_order])
self.ts_file.flush()
self.trace.append(record)
def pageout(self):
min_node = None
min_node_value = None
min_ghost = None
min_ghost_value = None
for depth, node in enumerate(self.stack.values()):
node.depth_memo = depth
for rank, node in enumerate(self.ranker.values()):
node.recompute_expected_value(depth=node.depth_memo, rank=rank)
value = node.expected_value
if not node.is_evicted:
if min_node is None or value < min_node_value:
min_node = node
min_node_value = value
if min_ghost is None or value < min_ghost_value:
min_ghost = node
min_ghost_value = value
if self.num_in_cache > self.cache_entries_limit:
self.evict(min_node)
if (
self.num_in_full_cache >
self.cache_entries_limit + self.ghost_entries_limit
):
self.purge(min_ghost)
def EM_algorithm(self, delta):
def abs_sum():
return abs(self.tau[0]) + abs(self.theta[0]) + abs(self.theta[1])
before = delta + 4.0
i = 0
# We need to detect if we're in a "nonsense" local optimum. The
# algorithm will optimize to the global maximum if we aren't in one of
# these cases.
if (self.startup or
self.tau[0] == 0.0 or
示例3: MINPolicy
# 需要导入模块: from bintrees import FastRBTree [as 别名]
# 或者: from bintrees.FastRBTree import values [as 别名]
class MINPolicy(object):
def __init__(self, cache_size_limit, trace, csv_suffix=".csv"):
self.cache_size_limit = cache_size_limit
self.cache = {}
self.hits = 0.0
self.requests = 0.0
self.ts_order = ['row', 'hit']
self.ts_datapoint = {key: None for key in self.ts_order}
self.ts_datapoint['row'] = 0
self.ts_file = open("csv/min" + csv_suffix, "w")
self.ts_writer = csv.writer(self.ts_file)
self.ts_writer.writerow(self.ts_order)
self.clairvoyance = FastRBTree()
self.precog = FastRBTree()
last_time = time.time()
for i, page_opcode in enumerate(trace):
if time.time() > last_time + 0.1:
last_time = time.time()
print '1', i, '\r',
sys.stdout.flush()
page, _ = page_opcode
try:
self.precog[page].append(i)
except KeyError:
self.precog[page] = collections.deque()
self.precog[page].append(i)
known_max = i
known_max += 2
for times in self.precog.values():
times.append(known_max)
known_max += 1
print
print 'Done loading.'
def hit_rate(self):
return self.hits / self.requests
def request(self, page):
self.requests += 1
if page in self.cache:
was_hit = True
self.hits += 1
else:
was_hit = False
self.cache[page] = self.precog[page].popleft()
# This happens on startup.
if self.cache[page] < self.requests:
self.cache[page] = self.precog[page].popleft()
self.clairvoyance[self.cache[page]] = page
self.ts_datapoint['row'] += 1
if was_hit:
self.ts_datapoint['hit'] = 1
else:
self.ts_datapoint['hit'] = 0
self.ts_writer.writerow(
[self.ts_datapoint[key] for key in self.ts_order])
self.ts_file.flush()
if len(self.cache) > self.cache_size_limit:
next_use, page = self.clairvoyance.pop_max()
del self.cache[page]
示例4: MMCRWPolicy
# 需要导入模块: from bintrees import FastRBTree [as 别名]
# 或者: from bintrees.FastRBTree import values [as 别名]
#.........这里部分代码省略.........
self.ts_datapoint['ghost_hit'] = 0
self.ts_datapoint['tau_R_SDD'] = self.tau[R_SDD]
self.ts_datapoint['tau_R_IRM'] = self.tau[R_IRM]
self.ts_datapoint['tau_W_SDD'] = self.tau[W_SDD]
self.ts_datapoint['tau_W_IRM'] = self.tau[W_IRM]
self.ts_datapoint['theta_R_SDD'] = self.theta[R_SDD]
self.ts_datapoint['theta_R_IRM'] = self.theta[R_IRM]
self.ts_datapoint['theta_W_SDD'] = self.theta[W_SDD]
self.ts_datapoint['theta_W_IRM'] = self.theta[W_IRM]
self.ts_datapoint['Z_R_SDD'] = record.Z[R_SDD]
self.ts_datapoint['Z_R_IRM'] = record.Z[R_IRM]
self.ts_datapoint['Z_W_SDD'] = record.Z[W_SDD]
self.ts_datapoint['Z_W_IRM'] = record.Z[W_IRM]
self.ts_datapoint['Z_sum'] = sum(record.Z)
self.ts_datapoint['depth'] = record.depth
self.ts_datapoint['rank'] = record.node.rank
self.ts_writer.writerow(
[self.ts_datapoint[key] for key in self.ts_order])
self.ts_file.flush()
self.trace.append(record)
def pageout(self):
min_node = None
min_node_value = None
min_ghost = None
min_ghost_value = None
for depth, node in enumerate(self.stack.values()):
node.depth_memo = depth
for rank, node in enumerate(self.ranker.values()):
node.recompute_expected_value(depth=node.depth_memo, rank=rank)
value = node.expected_value
if not node.is_evicted:
if min_node is None or value < min_node_value:
min_node = node
min_node_value = value
if min_ghost is None or value < min_ghost_value:
min_ghost = node
min_ghost_value = value
if self.num_in_cache > self.cache_entries_limit:
self.evict(min_node)
if (
self.num_in_full_cache >
self.cache_entries_limit + self.ghost_entries_limit
):
self.purge(min_ghost)
def EM_algorithm(self, delta):
def abs_sum():
return sum(self.tau) + sum(self.theta)
before = delta + 4.0
i = 0
# We need to detect if we're in a "nonsense" local optimum. The
# algorithm will optimize to the global maximum if we aren't in one of
# these cases.
if (self.startup or
min(self.tau) < 0.00001 or
示例5: Master
# 需要导入模块: from bintrees import FastRBTree [as 别名]
# 或者: from bintrees.FastRBTree import values [as 别名]
class Master(object):
__metaclass__ = ProcessMeta
def __init__(self, node_timeout):
self._logger = logging.getLogger(self.__class__.__name__)
self._nodes = {}
self._sessions = {}
self._sessions_by_owner = {}
self._keepalive_queue = FastRBTree()
self._priority_queue = FastRBTree()
self._node_timeout = node_timeout
self._culling_timer = runtime.greenpool.spawn(self._cull_dead_nodes)
def get_session(self, name, owner=None, dep_server=None, work_dir=None, worker_count=None, init=None):
try:
session = self._sessions[name]
session.dep_cache.set_dependency_server(dep_server)
return session
except KeyError:
if owner is None:
raise ValueError("An owner must be provided for new sessions")
if work_dir is None:
raise ValueError("Valid working directory required to create a new session")
if dep_server is None:
raise ValueError("Dependency server must be provided to create a new session")
session = Session(name, owner, dep_server, worker_count, self._spawn_workers, work_dir, init)
self._sessions[name] = session
self._sessions_by_owner.setdefault(owner, {})[name] = session
return RemoteCloud(name, owner, session.hub, session.created_on, len(session.workers), self)
def _spawn_workers(self, name, owner, worker_count, init):
all_nodes = itertools.imap(lambda nd: nd.itervalues(), self._priority_queue.values())
all_nodes = itertools.chain.from_iterable(all_nodes)
node_pool = NodePool(all_nodes, name, self._logger)
node_pool_size = len(node_pool)
if worker_count is None:
worker_count = node_pool_size
self._logger.info("Creating session %s:%s with %d workers", owner, name, worker_count)
# We can only ever have as many workers as there are processors in the cluster
if worker_count > node_pool_size:
self._logger.warning("Session %s: requested worker count %d will be capped to %d",
name, worker_count, node_pool_size)
worker_count = node_pool_size
workers = []
while len(workers) < worker_count and (len(node_pool) > 0):
results = node_pool.spawn_workers(worker_count - len(workers), init=init)
for nproc, result in results:
try:
worker_batch = result.get()
workers.extend(worker_batch)
except Exception as ex:
self._logger.error("Session %s: failed to spawn workers on node %s due to error:\n%s",
name, nproc, full_traceback(ex))
return workers
def shutdown_session(self, name):
session = self._sessions.pop(name)
owner_sessions = self._sessions_by_owner[session.owner]
del owner_sessions[name]
# Carry out the shutdown operation in the background
Tasklet.spawn(session.shutdown)
def node_update(self, node_proc, cpu_count, cpu_usage, ram_total, ram_usage):
# Remove the node from the queues if it is already registered
if node_proc in self._nodes:
node = self._nodes[node_proc]
self._dequeue(node)
else:
# Create a new node info if it doesn't exist yet
node = NodeInfo(node_proc, cpu_count)
self._nodes[node_proc] = node
# Update load based on a simple formula of tenancy and resource usage
node.update(cpu_usage + ram_usage, cpu_usage, ram_total, ram_usage)
self._logger.debug("Received ping %s", node)
# Enqueue the node again
self._enqueue(node)
def node_info(self):
return self._nodes.values()
def shutdown(self):
"""
Initiate cluster wide shutdown
"""
self._logger.warn("Shutting down cluster")
self._culling_timer.kill()
#.........这里部分代码省略.........