本文整理汇总了Python中six.iterkeys方法的典型用法代码示例。如果您正苦于以下问题:Python six.iterkeys方法的具体用法?Python six.iterkeys怎么用?Python six.iterkeys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six
的用法示例。
在下文中一共展示了six.iterkeys方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __str__
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def __str__(self):
"""Returns human readable representation, which is useful for debugging."""
buf = StringIO()
for batch_idx, (batch_id, batch_val) in enumerate(iteritems(self.data)):
if batch_idx >= TO_STR_MAX_BATCHES:
buf.write(u'...\n')
break
buf.write(u'BATCH "{0}"\n'.format(batch_id))
for k, v in iteritems(batch_val):
if k != 'images':
buf.write(u' {0}: {1}\n'.format(k, v))
for img_idx, img_id in enumerate(iterkeys(batch_val['images'])):
if img_idx >= TO_STR_MAX_IMAGES_PER_BATCH:
buf.write(u' ...')
break
buf.write(u' IMAGE "{0}" -- {1}\n'.format(img_id,
batch_val['images'][img_id]))
buf.write(u'\n')
return buf.getvalue()
示例2: images_from_filemap
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def images_from_filemap(fmap):
'''
images_from_filemap(fmap) yields a persistent map of MRImages tracked by the given subject with
the given name and path; in freesurfer subjects these are renamed and converted from their
typical freesurfer filenames (such as 'ribbon') to forms that conform to the neuropythy naming
conventions (such as 'gray_mask'). To access data by their original names, use the filemap.
'''
imgmap = fmap.data_tree.image
def img_loader(k): return lambda:imgmap[k]
imgs = {k:img_loader(k) for k in six.iterkeys(imgmap)}
def _make_mask(val, eq=True):
rib = imgmap['ribbon']
img = np.asarray(rib.dataobj)
arr = (img == val) if eq else (img != val)
arr.setflags(write=False)
return type(rib)(arr, rib.affine, rib.header)
imgs['lh_gray_mask'] = lambda:_make_mask(3)
imgs['lh_white_mask'] = lambda:_make_mask(2)
imgs['rh_gray_mask'] = lambda:_make_mask(42)
imgs['rh_white_mask'] = lambda:_make_mask(41)
imgs['brain_mask'] = lambda:_make_mask(0, False)
# merge in with the typical images
return pimms.merge(fmap.data_tree.image, pimms.lazy_map(imgs))
示例3: image_dimensions
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def image_dimensions(images):
'''
sub.image_dimensions is a tuple of the default size of an anatomical image for the given
subject.
'''
if images is None or len(images) == 0: return None
if pimms.is_lazy_map(images):
# look for an image that isn't lazy...
key = next((k for k in images.iterkeys() if not images.is_lazy(k)), None)
if key is None: key = next(images.iterkeys(), None)
else:
key = next(images.iterkeys(), None)
img = images[key]
if img is None: return None
if is_image(img): img = img.dataobj
return np.asarray(img).shape
示例4: basic_retinotopy_data
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def basic_retinotopy_data(hemi, retino_type):
'''
basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi
and retinotopy type t; it does this by looking at the properties in hemi and picking out any
combination that is commonly used to denote empirical retinotopy data. These common names are
stored in _predicted_retintopy_names, in order of preference, which may be modified.
The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'.
Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this
function calls both of these (predicted first then empirical) in the case that it does not
find a valid property.
'''
dat = _retinotopy_names[retino_type.lower()]
val = next((hemi.prop(s) for s in six.iterkeys(hemi.properties) if s.lower() in dat), None)
if val is None and retino_type.lower() != 'weight':
val = predicted_retinotopy_data(hemi, retino_type)
if val is None and retino_type.lower() != 'visual_area':
val = empirical_retinotopy_data(hemi, retino_type)
return val
示例5: subjects
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def subjects(_subjects):
'''
hcp.subjects is a lazy persistent map of all the subjects that are part of the HCP_1200
dataset. Subjects with valid retinotopic mapping data (assuming that the
ny.data['hcp_retinotopy'] dataset has been initialized) include retinotopic mapping data
as part of their property data.
'''
try:
from neuropythy import data
dset = data['hcp_retinotopy']
subs = dset.subjects
except Exception: return _subjects
# okay, so far so good; let's setup the subject updating function:
sids = set(list(_subjects.keys()))
def _add_retino(sid):
if sid in subs: return subs[sid]
else: return _subjects[sid]
return pimms.lazy_map({sid: curry(_add_retino, sid) for sid in six.iterkeys(_subjects)})
示例6: remove_child_resource_properties
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def remove_child_resource_properties(self, properties):
"""
Removes the properties that are supposed to be on the child
resource and not on the parent resource. It copies the properties
argument before it removes the copied values. It does not have
side effects in other words.
:param dict properties: The properties that are in the related
resource map that should not be in the parent resource.
:return: a dictionary of the updated properties
:rtype: :py:class:`dict`
"""
properties = properties.copy()
for key in six.iterkeys(self.property_map):
properties.pop(key, None)
properties.pop(self.name, None)
return properties
示例7: repr_dict
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def repr_dict(_dict, indent):
"""Return a debug representation of a dict or OrderedDict."""
# pprint represents OrderedDict objects using the tuple init syntax,
# which is not very readable. Therefore, dictionaries are iterated over.
if _dict is None:
return 'None'
if not isinstance(_dict, Mapping):
raise TypeError("Object must be a mapping, but is a %s" %
type(_dict))
if isinstance(_dict, OrderedDict):
kind = 'ordered'
ret = '%s {\n' % kind # non standard syntax for the kind indicator
for key in six.iterkeys(_dict):
value = _dict[key]
ret += _indent('%r: %r,\n' % (key, value), 2)
else: # dict
kind = 'sorted'
ret = '%s {\n' % kind # non standard syntax for the kind indicator
for key in sorted(six.iterkeys(_dict)):
value = _dict[key]
ret += _indent('%r: %r,\n' % (key, value), 2)
ret += '}'
ret = repr_text(ret, indent=indent)
return ret.lstrip(' ')
示例8: _filter_and_flatten
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def _filter_and_flatten(modules_):
"""Returns flattened dict, filtered according to FLAGS."""
flat = collections.OrderedDict()
def add(submodules, prefix=None):
for key, module_or_function in six.iteritems(submodules):
full_name = prefix + '__' + key if prefix is not None else key
if isinstance(module_or_function, dict):
add(module_or_function, full_name)
else:
if FLAGS.filter not in full_name:
continue
flat[full_name] = module_or_function
add(modules_)
# Make sure list of modules are in deterministic order. This is important when
# generating across multiple machines.
flat = collections.OrderedDict(
[(key, flat[key]) for key in sorted(six.iterkeys(flat))])
return flat
示例9: _apply_updates
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def _apply_updates(self, grad_func):
qs = self._var_list
self._define_variables(qs)
update_ops, infos = self._update(qs, grad_func)
with tf.control_dependencies([self.t.assign_add(1)]):
sample_op = tf.group(*update_ops)
list_attrib = zip(*map(lambda d: six.itervalues(d), infos))
list_attrib_with_k = map(lambda l: dict(zip(self._latent_k, l)),
list_attrib)
attrib_names = list(six.iterkeys(infos[0]))
dict_info = dict(zip(attrib_names, list_attrib_with_k))
SGMCMCInfo = namedtuple("SGMCMCInfo", attrib_names)
sgmcmc_info = SGMCMCInfo(**dict_info)
return sample_op, sgmcmc_info
示例10: wait_sending_last_messages
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def wait_sending_last_messages(self):
"""
Requests all channels to close and waits for it.
"""
if self.active and self.online is not False:
self.logger.debug("client sends last %s messages ..."
% ([str(i) + ':' + str(len(x)) for i, x in six.iteritems(self.queues)],))
for channel, messages in six.iteritems(self.queues):
for idx, message in enumerate(messages):
self.logger.debug("[%s] %d: %s" % (channel, idx, str(message)[0:120]))
# send all missing messages
# by joining we wait until its loop finish.
# it won't loop forever since we've set self.stop_on_empty_queue=True
for channel in six.iterkeys(self.ssh_channel):
if channel != '':
self._end_channel(channel)
# last is control channel
self._end_channel('')
示例11: _MultiDeviceAddN
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
示例12: init_app
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def init_app(self, app):
@app.before_request
def connect():
self.load_config(app)
g.listeners = getattr(g, 'listeners', {})
for name, listener_type in six.iteritems(listeners.listeners):
g.listeners[name] = listener_type(self)
g.listeners[name].setup()
@app.after_request
def disconnect(response):
for name in six.iterkeys(listeners.listeners):
listener = g.listeners.pop(name, None)
if listener:
listener.teardown()
return response
示例13: build_y_vocab
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def build_y_vocab(self):
pool = Pool(opt.num_workers)
try:
rets = pool.map_async(build_y_vocab,
[(data_path, 'train')
for data_path in opt.train_data_list]).get(99999999)
pool.close()
pool.join()
y_vocab = set()
for _y_vocab in rets:
for k in six.iterkeys(_y_vocab):
y_vocab.add(k)
self.y_vocab = {y: idx for idx, y in enumerate(y_vocab)}
except KeyboardInterrupt:
pool.terminate()
pool.join()
raise
self.logger.info('size of y vocab: %s' % len(self.y_vocab))
cPickle.dump(self.y_vocab, open(self.y_vocab_path, 'wb'), 2)
示例14: match
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def match(self, *args, **dargs):
if len(args) != len(self.args) or len(dargs) != len(self.dargs):
return False
for i, expected_arg in enumerate(self.args):
if not expected_arg.is_satisfied_by(args[i]):
return False
# check for incorrect dargs
for key, value in six.iteritems(dargs):
if key not in self.dargs:
return False
if not self.dargs[key].is_satisfied_by(value):
return False
# check for missing dargs
for key in six.iterkeys(self.dargs):
if key not in dargs:
return False
return True
示例15: test_extra_fields
# 需要导入模块: import six [as 别名]
# 或者: from six import iterkeys [as 别名]
def test_extra_fields(self):
class FooModelService(ModelService):
two = forms.CharField()
class Meta:
model = FooModel
fields = '__all__'
def process(self):
pass
f = FooModelService()
field_names = list(six.iterkeys(f.fields))
self.assertEqual(2, len(field_names))
self.assertEqual('one', field_names[0])
self.assertEqual('two', field_names[1])