本文整理汇总了Python中six.itervalues函数的典型用法代码示例。如果您正苦于以下问题:Python itervalues函数的具体用法?Python itervalues怎么用?Python itervalues使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了itervalues函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _cross_reference_materials
def _cross_reference_materials(self):
"""
Links the materials to materials (e.g. MAT1, CREEP)
often this is a pass statement
"""
for mat in itervalues(self.materials): # MAT1
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
self._ixref_errors += 1
var = traceback.format_exception_only(type(e), e)
self._stored_xref_errors.append((mat, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
# CREEP - depends on MAT1
data = [self.MATS1, self.MATS3, self.MATS8,
self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
self.MATT8, self.MATT9]
for material_deps in data:
for mat in itervalues(material_deps):
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
self._ixref_errors += 1
var = traceback.format_exception_only(type(e), e)
self._stored_xref_errors.append((mat, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
示例2: _convert_aero
def _convert_aero(model, xyz_scale, time_scale, weight_scale):
"""
Converts the aero cards
- CAEROx, PAEROx, SPLINEx, AECOMP, AELIST, AEPARAM, AESTAT, AESURF, AESURFS
"""
area_scale = xyz_scale ** 2
velocity_scale = xyz_scale / time_scale
pressure_scale = weight_scale / xyz_scale ** 2
density_scale = weight_scale / xyz_scale ** 3
for aero in itervalues(model.aero):
#if hasattr(model, 'aero'):
#aero = model.aero
print(aero.object_attributes())
aero.refc *= xyz_scale
aero.refb *= xyz_scale
aero.sref *= area_scale
aero.velocity *= velocity_scale
assert np.allclose(aero.density, 1.0), aero
for aeros in itervalues(model.aeros):
#print(aeros)
#print(aeros.object_attributes())
aeros.cref *= xyz_scale
aeros.bref *= xyz_scale
aeros.sref *= area_scale
for caero in itervalues(model.caeros):
if caero.type in ['CAERO1']:
caero.p1 *= xyz_scale
caero.p4 *= xyz_scale
caero.x12 *= xyz_scale
caero.x43 *= xyz_scale
else:
raise NotImplementedError(caero)
#for paero in itervalues(model.paeros):
#paero.cross_reference(model)
for trim in itervalues(model.trims):
trim.q *= pressure_scale
#for spline in itervalues(model.splines):
#spline.convert(model)
#for aecomp in itervalues(model.aecomps):
#aecomp.cross_reference(model)
#for aelist in itervalues(model.aelists):
#aelist.cross_reference(model)
#for aeparam in itervalues(model.aeparams):
#aeparam.cross_reference(model)
#for aestat in itervalues(model.aestats):
#aestat.cross_reference(model)
#for aesurf in itervalues(model.aesurf):
#aesurf.cross_reference(model)
#for aesurfs in itervalues(model.aesurfs):
#aesurfs.cross_reference(model)
# update only the FLFACTs corresponding to density
flfact_ids = set([])
for flutter in itervalues(model.flutters):
flfact = flutter.density
flfact_ids.add(flfact.sid)
for flfact_id in flfact_ids: # density
flfact = model.flfacts[flfact_id]
flfact.factors *= density_scale
示例3: _setup_communicators
def _setup_communicators(self, comm, parent_dir):
"""
Assign communicator to this `Component`.
Args
----
comm : an MPI communicator (real or fake)
The communicator being offered by the parent system.
parent_dir : str
The absolute directory of the parent, or '' if unspecified. Used to
determine the absolute directory of all FileRefs.
"""
super(Component, self)._setup_communicators(comm, parent_dir)
# set absolute directories of any FileRefs
for meta in chain(itervalues(self._init_unknowns_dict),
itervalues(self._init_params_dict)):
val = meta['val']
#if var is a FileRef, set its absolute directory
if isinstance(val, FileRef):
self._fileref_setup(val)
if not self.is_active():
for meta in itervalues(self._init_params_dict):
meta['remote'] = True
for meta in itervalues(self._init_unknowns_dict):
meta['remote'] = True
示例4: __init__
def __init__(self, wf_dict, gi=None):
super(Workflow, self).__init__(wf_dict, gi=gi)
missing_ids = []
if gi:
tools_list_by_id = [t.id for t in gi.tools.get_previews()]
else:
tools_list_by_id = []
for k, v in six.iteritems(self.steps):
# convert step ids to str for consistency with outer keys
v['id'] = str(v['id'])
for i in six.itervalues(v['input_steps']):
i['source_step'] = str(i['source_step'])
step = self._build_step(v, self)
self.steps[k] = step
if step.type == 'tool':
if not step.tool_inputs or step.tool_id not in tools_list_by_id:
missing_ids.append(k)
input_labels_to_ids = {}
for id_, d in six.iteritems(self.inputs):
input_labels_to_ids.setdefault(d['label'], set()).add(id_)
tool_labels_to_ids = {}
for s in six.itervalues(self.steps):
if s.type == 'tool':
tool_labels_to_ids.setdefault(s.tool_id, set()).add(s.id)
object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
dag, inv_dag = self._get_dag()
heads, tails = set(dag), set(inv_dag)
object.__setattr__(self, 'dag', dag)
object.__setattr__(self, 'inv_dag', inv_dag)
object.__setattr__(self, 'source_ids', heads - tails)
assert self.data_input_ids == set(self.inputs)
object.__setattr__(self, 'sink_ids', tails - heads)
object.__setattr__(self, 'missing_ids', missing_ids)
示例5: test_multiple_fonts
def test_multiple_fonts(self):
vera = os.path.join(os.path.dirname(__file__), "..", "fonts", "Vera.ttf")
__current_test_mode_setting = settings.CAPTCHA_FONT_PATH
settings.CAPTCHA_FONT_PATH = vera
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))
settings.CAPTCHA_FONT_PATH = [vera, vera, vera]
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))
settings.CAPTCHA_FONT_PATH = False
for key in [store.hashkey for store in six.itervalues(self.stores)]:
try:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.fail()
except ImproperlyConfigured:
pass
settings.CAPTCHA_FONT_PATH = __current_test_mode_setting
示例6: mergedirs
def mergedirs(listing):
# type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
r = [] # type: List[Dict[Text, Any]]
ents = {} # type: Dict[Text, Any]
collided = set() # type: Set[Text]
for e in listing:
if e["basename"] not in ents:
ents[e["basename"]] = e
elif e["class"] == "Directory":
if e.get("listing"):
ents[e["basename"]].setdefault("listing", []).extend(e["listing"])
if ents[e["basename"]]["location"].startswith("_:"):
ents[e["basename"]]["location"] = e["location"]
elif e["location"] != ents[e["basename"]]["location"]:
# same basename, different location, collision,
# rename both.
collided.add(e["basename"])
e2 = ents[e["basename"]]
e["basename"] = urllib.parse.quote(e["location"], safe="")
e2["basename"] = urllib.parse.quote(e2["location"], safe="")
e["nameroot"], e["nameext"] = os.path.splitext(e["basename"])
e2["nameroot"], e2["nameext"] = os.path.splitext(e2["basename"])
ents[e["basename"]] = e
ents[e2["basename"]] = e2
for c in collided:
del ents[c]
for e in itervalues(ents):
if e["class"] == "Directory" and "listing" in e:
e["listing"] = mergedirs(e["listing"])
r.extend(itervalues(ents))
return r
示例7: start
def start(self, register=True):
self.running = True
logger.info('starting %s at %s (pid=%s)', ', '.join(self.service_types), self.endpoint, os.getpid())
self.recv_loop_greenlet = self.spawn(self.recv_loop)
self.monitor.start()
self.service_registry.on_start()
self.event_system.on_start()
for service in six.itervalues(self.installed_services):
service.on_start()
service.configure({})
if register:
for service_type, service in six.iteritems(self.installed_services):
if not service.register_with_coordinator:
continue
try:
self.service_registry.register(self, service_type)
except RegistrationFailure:
logger.info("registration failed %s, %s", service_type, service)
self.stop()
for interface in six.itervalues(self.installed_services):
for pattern, handler in type(interface).event_dispatcher:
self.subscribe(pattern)
示例8: get_signals_to_object
def get_signals_to_object(self, sink_object):
"""Get the signals received by a sink object.
Returns
-------
{port : [ReceptionSpec, ...], ...}
Dictionary mapping ports to the lists of objects specifying
incoming signals.
"""
signals = collections.defaultdict(list)
# For all connections we have reference to identify those which
# terminate at the given object. For those that do add a new entry to
# the signal dictionary.
params_and_sinks = chain(*chain(*(itervalues(x) for x in
itervalues(self._connections))))
for param_and_sinks in params_and_sinks:
# tp_sinks are pairs of transmission parameters and sinks
# Extract the transmission parameters
sig_params, _ = param_and_sinks.parameters
# For each sink, if the sink object is the specified object
# then add signal to the list.
for sink in param_and_sinks.sinks:
if sink.sink_object is sink_object:
# This is the desired sink object, so remember the
# signal. First construction the reception
# specification.
signals[sink.port].append(
ReceptionSpec(sig_params, sink.reception_parameters)
)
return signals
示例9: build_update
def build_update(self):
"""
Simulate Langevin dynamics using a discretized integrator. Its
discretization error goes to zero as the learning rate decreases.
"""
old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
# Simulate Langevin dynamics.
learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
grad_log_joint = tf.gradients(self._log_joint(old_sample),
list(six.itervalues(old_sample)))
sample = {}
for z, qz, grad_log_p in \
zip(six.iterkeys(self.latent_vars),
six.itervalues(self.latent_vars),
grad_log_joint):
event_shape = qz.get_event_shape()
normal = Normal(mu=tf.zeros(event_shape),
sigma=learning_rate * tf.ones(event_shape))
sample[z] = old_sample[z] + 0.5 * learning_rate * grad_log_p + \
normal.sample()
# Update Empirical random variables.
assign_ops = []
variables = {x.name: x for x in
tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
for z, qz in six.iteritems(self.latent_vars):
variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
# Increment n_accept.
assign_ops.append(self.n_accept.assign_add(1))
return tf.group(*assign_ops)
示例10: _Net_batch
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Take
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).num
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
示例11: test_pre_fit
def test_pre_fit():
y0 = synthetic_spectrum()
x0 = np.arange(len(y0))
# the following items should appear
item_list = ['Ar_K', 'Fe_K', 'compton', 'elastic']
param = get_para()
# fit without weights
x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=None)
for v in item_list:
assert_true(v in y_total)
sum1 = np.sum(six.itervalues(y_total))
# r squares as a measurement
r1 = 1- np.sum((sum1-y0)**2)/np.sum((y0-np.mean(y0))**2)
assert_true(r1 > 0.85)
# fit with weights
w = 1/np.sqrt(y0)
x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=1/np.sqrt(y0))
for v in item_list:
assert_true(v in y_total)
sum2 = np.sum(six.itervalues(y_total))
# r squares as a measurement
r2 = 1- np.sum((sum2-y0)**2)/np.sum((y0-np.mean(y0))**2)
assert_true(r2 > 0.85)
示例12: _Net_forward_all
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
示例13: __init__
def __init__(self, history_specs, initial_sids, initial_dt):
# History specs to be served by this container.
self.history_specs = history_specs
self.frequency_groups = \
group_by_frequency(itervalues(self.history_specs))
# The set of fields specified by all history specs
self.fields = set(spec.field for spec in itervalues(history_specs))
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(
initial_sids,
initial_dt,
)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Populating initial frames here, so that the cost of creating the
# initial frames does not show up when profiling. These frames are
# cached since mid-stream creation of containing data frames on every
# bar is expensive.
self.create_return_frames(initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = {field: {} for field in self.fields}
示例14: stop_all
def stop_all(self):
for p in itervalues(self._programs):
p.log_cpu_times()
for p in itervalues(self._programs):
p.stop()
for p in itervalues(self._programs):
p.wait_or_kill()
示例15: _iter_vars
def _iter_vars(self):
for var in itervalues(self.vars):
if var.is_expression() or not var.is_indexed():
yield var
else:
for v in itervalues(var):
yield v