本文整理汇总了Python中toolz.dicttoolz.merge函数的典型用法代码示例。如果您正苦于以下问题:Python merge函数的具体用法?Python merge怎么用?Python merge使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了merge函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_added
def test_added(self):
"""
total desired, pending and actual are added to cloud metrics
"""
td = 10
ta = 20
tp = 3
tt = 7
tg = 13
m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60}
md = merge(m, {'metricValue': td, 'metricName': 'ord.desired'})
ma = merge(m, {'metricValue': ta, 'metricName': 'ord.actual'})
mp = merge(m, {'metricValue': tp, 'metricName': 'ord.pending'})
mt = merge(m, {'metricValue': tt, 'metricName': 'ord.tenants'})
mg = merge(m, {'metricValue': tg, 'metricName': 'ord.groups'})
req_data = [md, ma, mp, mt, mg]
log = object()
seq = [
(Func(time.time), const(100)),
(service_request(
ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest",
data=req_data, log=log).intent, noop)
]
eff = add_to_cloud_metrics(
m['ttlInSeconds'], 'ord', td, ta, tp, tt, tg, log=log)
self.assertIsNone(perform_sequence(seq, eff))
示例2: test_factory
def test_factory():
assert merge(defaultdict(int, {1: 2}), {2: 3}) == {1: 2, 2: 3}
assert (merge(defaultdict(int, {1: 2}), {2: 3},
factory=lambda: defaultdict(int)) ==
defaultdict(int, {1: 2, 2: 3}))
assert not (merge(defaultdict(int, {1: 2}), {2: 3},
factory=lambda: defaultdict(int)) == {1: 2, 2: 3})
assert raises(TypeError, lambda: merge({1: 2}, {2: 3}, factoryy=dict))
示例3: test_factory
def test_factory(self):
D, kw = self.D, self.kw
assert merge(defaultdict(int, D({1: 2})), D({2: 3})) == {1: 2, 2: 3}
assert (merge(defaultdict(int, D({1: 2})), D({2: 3}),
factory=lambda: defaultdict(int)) ==
defaultdict(int, D({1: 2, 2: 3})))
assert not (merge(defaultdict(int, D({1: 2})), D({2: 3}),
factory=lambda: defaultdict(int)) == {1: 2, 2: 3})
assert raises(TypeError, lambda: merge(D({1: 2}), D({2: 3}), factoryy=dict))
示例4: get_train
def get_train(train_id):
conn = getattr(g, 'db_conn')
query = r.table('train_movements').filter(r.row['train_id'] == train_id).order_by(r.desc('actual_timestamp'))
mvs = list(query.run(conn))
info = get_train_info(mvs[0])
train = merge(info, {'movements': mvs})
return json.dumps(train, default=json_formats.date_handler)
示例5: rolling_fit_opt_weights
def rolling_fit_opt_weights(df, opt_weights_func, look_ahead_per):
"""applies opt_weights_func to rolling window on pandas df"""
num_rows = df.shape[0]
p = pipe(xrange(num_rows),
filter(lambda x: x + look_ahead_per < num_rows),
map(lambda x: {df.index[x]: opt_weights_func(df.iloc[x:x+look_ahead_per+1])}))
return pd.DataFrame(merge(p)).T
示例6: __init__
def __init__(self, name, hist_return=None, industry_weight=None, property_dict=defaultdict(str), **kwargs):
self.name = name
self.property = merge(_REQUIRED_BENCHMARK_PROPERTY, property_dict)
self.production_data_format = kwargs.get('production_data_format', OutputDataFormat.MULTI_INDEX_DF)
self.hist_return = hist_return
self.industry_weight = industry_weight # 对标指数的行业成分比例
self._validate_data_format()
self._validate_date_format()
示例7: add_to_cloud_metrics
def add_to_cloud_metrics(ttl, region, group_metrics, num_tenants, config,
log=None, _print=False):
"""
Add total number of desired, actual and pending servers of a region
to Cloud metrics.
:param str region: which region's metric is collected
:param group_metrics: List of :obj:`GroupMetric`
:param int num_tenants: total number of tenants
:param dict config: Config json dict containing convergence tenants info
:param log: Optional logger
:param bool _print: Should it print activity on stdout? Useful when running
as a script
:return: `Effect` with None
"""
epoch = yield Effect(Func(time.time))
metric_part = {'collectionTime': int(epoch * 1000),
'ttlInSeconds': ttl}
tenanted_metrics, total = calc_total(group_metrics)
if log is not None:
log.msg(
'total desired: {td}, total_actual: {ta}, total pending: {tp}',
td=total.desired, ta=total.actual, tp=total.pending)
if _print:
print('total desired: {}, total actual: {}, total pending: {}'.format(
total.desired, total.actual, total.pending))
metrics = [('desired', total.desired), ('actual', total.actual),
('pending', total.pending), ('tenants', num_tenants),
('groups', len(group_metrics))]
for tenant_id, metric in sorted(tenanted_metrics.items()):
metrics.append(("{}.desired".format(tenant_id), metric.desired))
metrics.append(("{}.actual".format(tenant_id), metric.actual))
metrics.append(("{}.pending".format(tenant_id), metric.pending))
# convergence tenants desired and actual
conv_tenants = keyfilter(
partial(tenant_is_enabled,
get_config_value=lambda k: get_in([k], config)),
tenanted_metrics)
conv_desired = sum(m.desired for m in conv_tenants.itervalues())
conv_actual = sum(m.actual for m in conv_tenants.itervalues())
metrics.extend(
[("conv_desired", conv_desired), ("conv_actual", conv_actual),
("conv_divergence", conv_desired - conv_actual)])
data = [merge(metric_part,
{'metricValue': value,
'metricName': '{}.{}'.format(region, metric)})
for metric, value in metrics]
yield service_request(ServiceType.CLOUD_METRICS_INGEST,
'POST', 'ingest', data=data, log=log)
示例8: get_step_limits_from_conf
def get_step_limits_from_conf(limit_conf):
"""
Get step limits along with defaults for steps not in limit_conf
:param dict limit_conf: step name -> limit mapping
:return: `dict` of step class -> limit
"""
step_limits = {
step_conf_to_class[step_conf]: limit
for step_conf, limit in limit_conf.items()}
return merge(_DEFAULT_STEP_LIMITS, step_limits)
示例9: clean_movement_message
def clean_movement_message(msg, msg_type, conn):
extras = {'type': msg_type}
body = msg['body']
for key in body.keys():
if key.endswith('_stanox'):
logger.debug('Train {}: Lookup stanox {} for field {}'.format(body['train_id'], body[key], key))
extras = merge(extras, get_geo(body[key], key[:-len('_stanox')], conn))
if key.endswith('_timestamp'):
try:
logger.debug('Converting timestamp for field {}'.format(key))
intval = int(body[key])
extras[key] = r.epoch_time(intval / 1000.0)
except:
pass
if body[key] == 'true' or body[key] == 'false':
extras[key] = bool(body[key] == 'true')
return merge(body, extras)
示例10: _renderDirectory
def _renderDirectory(self, ruleHits, ruleStats, directory, filename):
# Generate output HTML for each rule
for rule, hits in ruleHits.items():
# Render hits for individual rule
outfilePathJSON = os.path.join(directory, rule.machine_name + ".json")
if len(hits) > 0: # Render hits
# Generate JSON API
jsonAPI = {
"timestamp": self.timestamp,
"downloadTimestamp": self.downloadTimestamp,
"rule": rule.meta_dict,
"hits": [valfilter(bool, {"msgstr": entry.msgstr, # valfilter: remove empty values for smaller JSON
"msgid": entry.msgid,
"tcomment": entry.tcomment,
"hit": hit,
"origImages": origImages,
"translatedImages": translatedImages,
"crowdinLink": "{0}#q={1}".format(self.translationURLs[filename], genCrowdinSearchString(entry))
})
for entry, hit, filename, origImages, translatedImages in hits]
}
writeJSONToFile(outfilePathJSON, jsonAPI)
else: # Remove file (redirects to 404 file) if there are no exportHitsAsJSON
if os.path.isfile(outfilePathJSON):
os.remove(outfilePathJSON)
# Render file index page (no filelist)
ruleInfos = [merge(rule.meta_dict, {"num_hits": ruleStats[rule]})
for rule in self.rules if ruleStats[rule] > 0]
ruleInfos.sort(key=lambda o: -o["severity"]) # Invert sort order
js = {
"pageTimestamp": self.timestamp,
"downloadTimestamp": self.downloadTimestamp,
"stats": ruleInfos,
"files": [merge(self.statsByFile[filename], {"filename": filename})
for filename in self.files
if self.statsByFile[filename]["notices"] > 0]
}
writeJSONToFile(os.path.join(directory, "index.json"), js)
示例11: prepare_server_launch_config
def prepare_server_launch_config(group_id, server_config, lb_descriptions):
"""
Prepare a server config (the server part of the Group's launch config)
with any necessary dynamic data.
:param str group_id: The group ID
:param PMap server_config: The server part of the Group's launch config,
as per :obj:`otter.json_schema.group_schemas.server` except as the
value of a one-element PMap with key "server".
:param iterable lb_descriptions: iterable of
:class:`ILBDescription` providers
"""
updated_metadata = merge(
get_in(('server', 'metadata'), server_config, {}),
generate_metadata(group_id, lb_descriptions))
return set_in(server_config, ('server', 'metadata'), updated_metadata)
示例12: mark_deleted_servers
def mark_deleted_servers(old, new):
"""
Given dictionaries containing old and new servers, return a list of all
servers, with the deleted ones annotated with a status of DELETED.
:param list old: List of old servers
:param list new: List of latest servers
:return: List of updated servers
"""
def sdict(servers):
return {s['id']: s for s in servers}
old = sdict(old)
new = sdict(new)
deleted_ids = set(old.keys()) - set(new.keys())
for sid in deleted_ids:
old[sid] = assoc(old[sid], "status", "DELETED")
return merge(old, new).values()
示例13: computeRuleHitsForFileSet
def computeRuleHitsForFileSet(self, poFiles):
"""
For each file in the given filename -> PO object dictionary,
compute the Rule -> Hits dictonary.
Stores the information in the current instance.
Does not return anything
"""
# Compute dict with sorted & prettified filenames
self.files = sorted(poFiles.keys())
# Add all futures to the executor
futures = list(itertools.chain(*(self.computeRuleHits(po, filename)
for filename, po in poFiles.items())))
# Process the results in first-received order. Also keep track of rule performance
self.fileRuleHits = collections.defaultdict(dict)
n_finished = 0
# Intermediate result storage
raw_results = collections.defaultdict(dict) # filename -> {rule: result}
for future in concurrent.futures.as_completed(futures):
# Extract result
filename, rule, result = future.result()
self.fileRuleHits[filename][rule] = result
# Track progress
n_finished += 1
if n_finished % 1000 == 0:
percent_finished = n_finished * 100. / len(futures)
print("Rule computation finished {0:.2f} %".format(percent_finished))
# Compute total stats by file
self.statsByFile = {
filename: merge(self.ruleHitsToSeverityCountMap(ruleHits), {
"translation_url": self.translationURLs[filename]})
for filename, ruleHits in self.fileRuleHits.items()
}
# Compute map filename -> {rule: numHits for rule}
self.statsByFileAndRule = {
filename: valmap(len, ruleHits)
for filename, ruleHits in self.fileRuleHits.items()
}
# Compute map rule -> numHits for rule
self.totalStatsByRule = merge_with(sum, *(self.statsByFileAndRule.values()))
示例14: unchanged_divergent_groups
def unchanged_divergent_groups(clock, current, timeout, group_metrics):
"""
Return list of GroupMetrics that have been divergent and unchanged for
timeout seconds
:param IReactorTime clock: Twisted time used to track
:param dict current: Currently tracked divergent groups
:param float timeout: Timeout in seconds
:param list group_metrics: List of group metrics
:return: (updated current, List of (group, divergent_time) tuples)
"""
converged, diverged = partition_bool(
lambda gm: gm.actual + gm.pending == gm.desired, group_metrics)
# stop tracking all converged and deleted groups
deleted = set(current.keys()) - metrics_set(group_metrics)
updated = current.copy()
for g in metrics_set(converged) | deleted:
updated.pop(g, None)
# Start tracking divergent groups depending on whether they've changed
now = clock.seconds()
to_log, new = [], {}
for gm in diverged:
pair = (gm.tenant_id, gm.group_id)
if pair in updated:
last_time, values = updated[pair]
if values != hash((gm.desired, gm.actual, gm.pending)):
del updated[pair]
continue
time_diff = now - last_time
if time_diff > timeout and time_diff % timeout <= 60:
# log on intervals of timeout. For example, if timeout is 1 hr
# then log every hour it remains diverged
to_log.append((gm, time_diff))
else:
new[pair] = now, hash((gm.desired, gm.actual, gm.pending))
return merge(updated, new), to_log
示例15: dict
str : None,
object : None,
Union : lambda xs: xs[0],
List : lambda x : [examples[x]]*3,
}
simple = lambda t: dict(name=dict(type=t,value=None,example=examples.get(t)))
primitives = [int, bool, str, float]
primdict = dict(zip(primitives, [simple]*len(primitives)))
enum = compose(simple, lambda x: x.__name__)
def handle_union(xs):
xs = list(map(lambda x: x['name']['type'], xs))
return dict(name=dict(choices=xs, example=xs[0],value=None))
def handle_list(t):
t = next(t)
return dict(name=dict(type=t,value=None,example=examples[List](t))),
tfuncs= merge(primdict, {
object : enum,
NamedTuple : enum,
Optional : lambda x: merge(simple(x), {'optional' : True}),
List : handle_list,
#Union : lambda xs: dict(name=dict(choices=xs, example=xs[0],value=None),
Union : handle_union
})
#{n : t for n,t in res = traverse_type(TrimOpts, tfuncs)
from functools import reduce
from itertools import starmap
#res = reduce(merge, map(lambda x: traverse_type(x, tfuncs), TrimOpts._field_types), {})
print(TrimOpts.__dict__)
res = {k : traverse_type(t, tfuncs) for k,t in TrimOpts._field_types.items()}
print(res)