本文整理汇总了Python中blocks.utils.dict_subset方法的典型用法代码示例。如果您正苦于以下问题:Python utils.dict_subset方法的具体用法?Python utils.dict_subset怎么用?Python utils.dict_subset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.utils
的用法示例。
在下文中一共展示了utils.dict_subset方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_batch
# 需要导入模块: from blocks import utils [as 别名]
# 或者: from blocks.utils import dict_subset [as 别名]
def process_batch(self, batch):
try:
input_names = [v.name for v in self.unique_inputs]
batch = dict_subset(batch, input_names)
except KeyError:
reraise_as(
"Not all data sources required for monitoring were"
" provided. The list of required data sources:"
" {}.".format(input_names))
if self._accumulate_fun is not None:
numerical_values = self._accumulate_fun(**batch)
for value, var in zip(numerical_values,self.theano_variables):
self.data[var.name].append(value)
示例2: cost_matrix
# 需要导入模块: from blocks import utils [as 别名]
# 或者: from blocks.utils import dict_subset [as 别名]
def cost_matrix(self, application_call, outputs, mask=None, **kwargs):
"""Adapted from ``BaseSequenceGenerator.cost_matrix``
"""
# We assume the data has axes (time, batch, features, ...)
batch_size = outputs.shape[1]
# Prepare input for the iterative part
states = dict_subset(kwargs, self._state_names, must_have=False)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
feedback = self.readout.feedback(outputs)
inputs = self.fork.apply(feedback, as_dict=True)
# Run the recurrent network
results = self.transition.apply(
mask=mask, return_initial_states=True, as_dict=True,
**dict_union(inputs, states, contexts))
# Separate the deliverables. The last states are discarded: they
# are not used to predict any output symbol. The initial glimpses
# are discarded because they are not used for prediction.
# Remember, glimpses are computed _before_ output stage, states are
# computed after.
states = {name: results[name][:-1] for name in self._state_names}
glimpses = {name: results[name][1:] for name in self._glimpse_names}
# Compute the cost
feedback = tensor.roll(feedback, 1, 0)
feedback = tensor.set_subtensor(
feedback[0],
self.readout.feedback(self.readout.initial_outputs(batch_size)))
readouts = self.readout.readout(
feedback=feedback, **dict_union(states, glimpses, contexts))
costs = self.readout.cost(readouts, outputs)
if mask is not None:
costs *= mask
for name, variable in list(glimpses.items()) + list(states.items()):
application_call.add_auxiliary_variable(
variable.copy(), name=name)
# This variables can be used to initialize the initial states of the
# next batch using the last states of the current batch.
for name in self._state_names:
application_call.add_auxiliary_variable(
results[name][-1].copy(), name=name+"_final_value")
if not self.pruning_variables_initialized:
self.results = results
self.pruning_variables_initialized = True
return costs
示例3: cost_matrix_nmt
# 需要导入模块: from blocks import utils [as 别名]
# 或者: from blocks.utils import dict_subset [as 别名]
def cost_matrix_nmt(self, application_call, target_char_seq, target_sample_matrix, target_resample_matrix,
target_word_mask, target_char_aux, target_prev_char_seq, target_prev_char_aux, **kwargs):
"""Returns generation costs for output sequences.
See Also
--------
:meth:`cost` : Scalar cost.
"""
# We assume the data has axes (time, batch, features, ...)
batch_size = target_char_seq.shape[1]
# Prepare input for the iterative part
states = dict_subset(kwargs, self._state_names, must_have=False)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
feedback = self.readout.feedback_apply(target_char_seq, target_sample_matrix, target_char_aux)
inputs = self.fork.apply(feedback, as_dict=True)
# Run the recurrent network
results = self.transition.apply(
mask=target_word_mask, return_initial_states=True, as_dict=True,
**dict_union(inputs, states, contexts))
# Separate the deliverables. The last states are discarded: they
# are not used to predict any output symbol. The initial glimpses
# are discarded because they are not used for prediction.
# Remember, glimpses are computed _before_ output stage, states are
# computed after.
states = {name: results[name][:-1] for name in self._state_names}
glimpses = {name: results[name][1:] for name in self._glimpse_names}
feedback = tensor.roll(feedback, 1, 0)
init_feedback = self.readout.single_feedback(self.readout.initial_outputs(batch_size), batch_size)
if self.trg_dgru_depth == 1:
feedback = tensor.set_subtensor(feedback[0], init_feedback)
else:
feedback = tensor.set_subtensor(feedback[0], init_feedback[-1])
decoder_readout_outputs = self.readout.readout(
feedback=feedback, **dict_union(states, glimpses, contexts))
resampled_representation = tensor.batched_dot(target_resample_matrix,
decoder_readout_outputs.dimshuffle([1, 0, 2]))
resampled_readouts = resampled_representation.dimshuffle([1, 0, 2])
readouts_chars = self.readout.readout_gru(target_prev_char_seq, target_prev_char_aux, resampled_readouts)
# Compute the cost
costs = self.readout.cost(readouts_chars, target_char_seq)
for name, variable in list(glimpses.items()) + list(states.items()):
application_call.add_auxiliary_variable(
variable.copy(), name=name)
# This variables can be used to initialize the initial states of the
# next batch using the last states of the current batch.
for name in self._state_names + self._glimpse_names:
application_call.add_auxiliary_variable(
results[name][-1].copy(), name=name + "_final_value")
return costs