本文整理汇总了Python中utils.AttributeDict.iteritems方法的典型用法代码示例。如果您正苦于以下问题:Python AttributeDict.iteritems方法的具体用法?Python AttributeDict.iteritems怎么用?Python AttributeDict.iteritems使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.AttributeDict
的用法示例。
在下文中一共展示了AttributeDict.iteritems方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_and_log_params
# 需要导入模块: from utils import AttributeDict [as 别名]
# 或者: from utils.AttributeDict import iteritems [as 别名]
def load_and_log_params(cli_params):
cli_params = AttributeDict(cli_params)
if cli_params.get('load_from'):
p = load_df(cli_params.load_from, 'params').to_dict()[0]
p = AttributeDict(p)
for key in cli_params.iterkeys():
if key not in p:
p[key] = None
new_params = cli_params
loaded = True
else:
p = cli_params
new_params = {}
loaded = False
# Make dseed seed unless specified explicitly
if p.get('dseed') is None and p.get('seed') is not None:
p['dseed'] = p['seed']
logger.info('== COMMAND LINE ==')
logger.info(' '.join(sys.argv))
logger.info('== PARAMETERS ==')
for k, v in p.iteritems():
if new_params.get(k) is not None:
p[k] = new_params[k]
replace_str = "<- " + str(new_params.get(k))
else:
replace_str = ""
logger.info(" {:20}: {:<20} {}".format(k, v, replace_str))
return p, loaded
示例2: apply_tagger
# 需要导入模块: from utils import AttributeDict [as 别名]
# 或者: from utils.AttributeDict import iteritems [as 别名]
#.........这里部分代码省略.........
if step == 0:
# No values from previous iteration, so let's make them up
m, z = self.init_m_z(input_shape)
z_hat_pre_bin = None
# let's keep in the bookkeeping for the visualizations.
if y:
d.z.append(z)
d.m.append(m)
else:
# Feed in the previous iteration's estimates
z = z_hat
m = m_hat
# Compute m_lh
m_lh = self.m_lh(x_corr, z, v)
z_delta = self.f_z_deriv(x_corr, z, m)
z_tilde = z_hat_pre_bin if z_hat_pre_bin is not None else z
# Concatenate all inputs
inputs = [z_tilde, z_delta, m, m_lh]
inputs = T.concatenate(inputs, axis=2)
# Projection, batch-normalization and activation to a hidden layer
z = self.proj(inputs, in_dim * 4, self.p.encoder_proj[0])
z -= z.mean((0, 1), keepdims=True)
z /= T.sqrt(z.var((0, 1), keepdims=True) + np.float32(1e-10))
z += self.bias(0.0 * np.ones(self.p.encoder_proj[0]), 'b')
h = self.apply_act(z, 'relu')
# The first dimension is the group. Let's flatten together with
# minibatch in order to have parametric mapping compute all groups
# in parallel
h, undo_flatten = flatten_first_two_dims(h)
# Parametric Mapping
# ==================
self.ladder.apply(None, self.y, h)
ladder_encoder_output = undo_flatten(self.ladder.act.corr.unlabeled.h[len(self.p.encoder_proj) - 1])
ladder_decoder_output = undo_flatten(self.ladder.act.est.z[0])
# Decoder
# =======
# compute z_hat
z_u = self.proj(ladder_decoder_output, self.p.encoder_proj[0], in_dim, scope='z_u')
z_u -= z_u.mean((0, 1), keepdims=True)
z_u /= T.sqrt(z_u.var((0, 1), keepdims=True) + np.float32(1e-10))
z_hat = self.weight(np.ones(in_dim), 'c1') * z_u + self.bias(np.zeros(in_dim), 'b1')
z_hat = z_hat.reshape(input_shape)
# compute m_hat
m_u = self.proj(ladder_decoder_output, self.p.encoder_proj[0], in_dim, scope='m_u')
m_u -= m_u.mean((0, 1), keepdims=True)
m_u /= T.sqrt(m_u.var((0, 1), keepdims=True) + np.float32(1e-10))
c = self.weight(np.float32(1), 'c2')
m_hat = nn.softmax_n(m_u * c, axis=0)
m_hat = m_hat.reshape(input_shape)
# Apply sigmoid activation if input_type is binary
if self.p.input_type == 'binary':
z_hat_pre_bin = z_hat
z_hat = self.apply_act(z_hat, 'sigmoid')
# Collapse layer
# ==============
# Remove the last dim, which is assumed to be class 'None'
pred = ladder_encoder_output[:, :, :-1]
# Normalize
pred /= T.sum(T.sum(pred, axis=2, keepdims=True), axis=0, keepdims=True)
# Denoising and Classification costs
# ==================================
if y:
class_cost, class_error = self.compute_classification_cost_and_error(pred, y)
d.pred.append(pred)
d.class_cost.append(class_cost)
d.class_error.append(class_error)
d.m.append(m_hat)
d.z.append(z_hat)
else:
d.denoising_cost.append(self.denoising_cost(z_hat, m_hat, x, v))
ami_score, ami_score_per_sample = self.mask_accuracy(self.masks_unlabeled, m_hat)
d.ami_score.append(ami_score)
d.ami_score_per_sample.append(ami_score_per_sample)
# stack the list of tensors into one
d = AttributeDict({key: T.stacklists(val) for key, val in d.iteritems()})
return d