本文整理匯總了Python中collections.defaultdict方法的典型用法代碼示例。如果您正苦於以下問題:Python collections.defaultdict方法的具體用法?Python collections.defaultdict怎麽用?Python collections.defaultdict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類collections
的用法示例。
在下文中一共展示了collections.defaultdict方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def __init__(self):
self.args = None
self.alignDistance = 0
self.samples = collections.OrderedDict()
self.genome = None
self.sources = {}
self.annotationSets = collections.OrderedDict()
# for storing axes, annotations, etc, by allele
self.alleleTracks = collections.defaultdict(collections.OrderedDict)
self.trackCompositor = None
self.dotplots = {}
self.info = {}
self.reset()
示例2: _add_sparse_vector_labes
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def _add_sparse_vector_labes(self, graph, vertex_v, node_feature_list):
# add the vector with a feature resulting from hashing
# the discrete labeled graph sparse encoding with the sparse vector
# feature, the val is then multiplied.
svec = graph.nodes[vertex_v].get(self.key_svec, None)
if svec:
vec_feature_list = defaultdict(lambda: defaultdict(float))
for radius_dist_key in node_feature_list:
for feature in node_feature_list[radius_dist_key]:
val = node_feature_list[radius_dist_key][feature]
for i in svec:
vec_val = svec[i]
key = fast_hash_2(feature, i, self.bitmask)
vec_feature_list[radius_dist_key][key] += val * vec_val
node_feature_list = vec_feature_list
return node_feature_list
示例3: extract_sequence_and_score
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
示例4: compute_matching_neighborhoods_fraction
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def compute_matching_neighborhoods_fraction(GA, GB, pairings):
count = 0
matches = dict([(i, j) for i, j in enumerate(pairings)])
matching_edges = defaultdict(list)
for i, j in GA.edges():
ii = matches[i]
jj = matches[j]
if (ii, jj) in GB.edges():
matching_edges[i].append(j)
matching_edges[j].append(i)
for u in GA.nodes():
if matching_edges.get(u, False):
neighbors = nx.neighbors(GA, u)
matches_neighborhood = True
for v in neighbors:
if v not in matching_edges[u]:
matches_neighborhood = False
break
if matches_neighborhood:
count += 1
return float(count) / len(GA.nodes())
示例5: lifecycle
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def lifecycle(args):
if args.delete:
return resources.s3.BucketLifecycle(args.bucket_name).delete()
rule = defaultdict(list, Prefix=args.prefix, Status="Enabled")
if args.transition_to_infrequent_access is not None:
rule["Transitions"].append(dict(StorageClass="STANDARD_IA", Days=args.transition_to_infrequent_access))
if args.transition_to_glacier is not None:
rule["Transitions"].append(dict(StorageClass="GLACIER", Days=args.transition_to_glacier))
if args.expire is not None:
rule["Expiration"] = dict(Days=args.expire)
if args.abort_incomplete_multipart_upload is not None:
rule["AbortIncompleteMultipartUpload"] = dict(DaysAfterInitiation=args.abort_incomplete_multipart_upload)
if len(rule) > 2:
clients.s3.put_bucket_lifecycle_configuration(Bucket=args.bucket_name,
LifecycleConfiguration=dict(Rules=[rule]))
try:
for rule in resources.s3.BucketLifecycle(args.bucket_name).rules:
print(json.dumps(rule))
except ClientError as e:
expect_error_codes(e, "NoSuchLifecycleConfiguration")
logger.error("No lifecycle configuration for bucket %s", args.bucket_name)
示例6: sample_latent
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def sample_latent(self, input, input_latent_mu, input_latent_sigma, pred_latent_mu,
pred_latent_sigma, initial_pose_mu, initial_pose_sigma, sample=True):
'''
Return latent variables: dictionary containing pose and content.
Then, crop objects from the images and encode into z.
'''
latent = defaultdict(lambda: None)
beta = self.get_transitions(input_latent_mu, input_latent_sigma,
pred_latent_mu, pred_latent_sigma, sample)
pose = self.accumulate_pose(beta)
# Sample initial pose
initial_pose = self.pyro_sample('initial_pose', dist.Normal, initial_pose_mu,
initial_pose_sigma, sample)
pose += initial_pose.view(-1, 1, self.n_components, self.pose_latent_size)
pose = self.constrain_pose(pose)
# Get input objects
input_pose = pose[:, :self.n_frames_input, :, :]
input_obj = self.get_objects(input, input_pose)
# Encode the sampled objects
z = self.object_encoder(input_obj)
z = self.sample_content(z, sample)
latent.update({'pose': pose, 'content': z})
return latent
示例7: __init__
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def __init__(self, config, flows_dir, ports_dir, num_timesteps, debug=False):
self.logger = logging.getLogger("LogHistory")
if debug:
self.logger.setLevel(logging.DEBUG)
self.log_entry = namedtuple("LogEntry", "source destination type")
self.ports = defaultdict(list)
self.flows = defaultdict(list)
self.data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
self.current_timestep = 0
self.total_timesteps = num_timesteps
self.parse_config(config)
self.parse_logs(num_timesteps, flows_dir, ports_dir)
self.info()
pretty(self.data)
示例8: loadEmbedding
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def loadEmbedding(filename):
"""
加載詞向量文件
:param filename: 文件名
:return: embeddings列表和它對應的索引
"""
embeddings = []
word2idx = defaultdict(list)
with open(filename, mode="r", encoding="utf-8") as rf:
for line in rf:
arr = line.split(" ")
embedding = [float(val) for val in arr[1: -1]]
word2idx[arr[0]] = len(word2idx)
embeddings.append(embedding)
return embeddings, word2idx
示例9: load_json_logs
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
示例10: __init__
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def __init__(self, tau=0, name="", ds_name=""):
self.name = name
self.ds_name = ds_name
self.tau = tau
self.ids = set()
self.ids_correct = set()
self.ids_correct_fp = set()
self.ids_agree = set()
# Legal = there is a fingerprint match below threshold tau
self.ids_legal = set()
self.counts = defaultdict(lambda: 0)
self.counts_legal = defaultdict(lambda: 0)
self.counts_correct = defaultdict(lambda: 0)
# Total number of examples
self.i = 0
示例11: _save_sorted_results
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def _save_sorted_results(self, run_stats, scores, image_count, filename):
"""Saves sorted (by score) results of the evaluation.
Args:
run_stats: dictionary with runtime statistics for submissions,
can be generated by WorkPiecesBase.compute_work_statistics
scores: dictionary mapping submission ids to scores
image_count: dictionary with number of images processed by submission
filename: output filename
"""
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['SubmissionID', 'ExternalTeamId', 'Score',
'MedianTime', 'ImageCount'])
get_second = lambda x: x[1]
for s_id, score in sorted(iteritems(scores),
key=get_second, reverse=True):
external_id = self.submissions.get_external_id(s_id)
stat = run_stats.get(
s_id, collections.defaultdict(lambda: float('NaN')))
writer.writerow([s_id, external_id, score,
stat['median_eval_time'],
image_count[s_id]])
示例12: metric_values
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def metric_values(metric, by_labels=()):
"""Return values for the metric."""
if metric._type == "gauge":
suffix = ""
elif metric._type == "counter":
suffix = "_total"
values = defaultdict(list)
for sample_suffix, labels, value in metric._samples():
if sample_suffix == suffix:
if by_labels:
label_values = tuple(labels[label] for label in by_labels)
values[label_values] = value
else:
values[sample_suffix].append(value)
return values if by_labels else values[suffix]
示例13: __init__
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def __init__(self,corpus_dir,datastore_type='file',db_name='corpus.db'):
'''
Read links and associated categories for specified articles
in text file seperated by a space
Args:
corpus_dir (str): The directory to save the generated corpus
datastore_type (Optional[str]): Format to save generated corpus.
Specify either 'file' or 'sqlite'.
db_name (Optional[str]): Name of database if 'sqlite' is selected.
'''
self.g = Goose({'browser_user_agent': 'Mozilla','parser_class':'soup'})
#self.g = Goose({'browser_user_agent': 'Mozilla'})
self.corpus_dir = corpus_dir
self.datastore_type = datastore_type
self.db_name = db_name
self.stats = defaultdict(int)
self._create_corpus_dir(self.corpus_dir)
self.db = None
if self.datastore_type == 'sqlite':
self.db = self.corpus_dir + '/' + self.db_name
self._set_up_db(self.db)
示例14: _create_unique_fields_cache
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def _create_unique_fields_cache(cells):
primary_key_column_numbers = []
cache = {}
# Unique
for _, cell in enumerate(cells, start=1):
field = cell.get('field')
column_number = cell.get('column-number')
if field is not None:
if field.descriptor.get('primaryKey'):
primary_key_column_numbers.append(column_number)
if field.constraints.get('unique'):
cache[tuple([column_number])] = defaultdict(list)
# Primary key
if primary_key_column_numbers:
cache[tuple(primary_key_column_numbers)] = defaultdict(list)
return cache
示例15: __init__
# 需要導入模塊: import collections [as 別名]
# 或者: from collections import defaultdict [as 別名]
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()