本文整理匯總了Python中dragnn.python.network_units.get_input_tensor_with_stride方法的典型用法代碼示例。如果您正苦於以下問題:Python network_units.get_input_tensor_with_stride方法的具體用法?Python network_units.get_input_tensor_with_stride怎麽用?Python network_units.get_input_tensor_with_stride使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dragnn.python.network_units
的用法示例。
在下文中一共展示了network_units.get_input_tensor_with_stride方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: create
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import get_input_tensor_with_stride [as 別名]
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BulkBiLSTMNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# Flatten the lengths into a vector.
lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.squeeze(lengths.tensor, [1])
# Collect all other inputs into a batched tensor.
linked_embeddings = [
named_tensor for named_tensor in linked_embeddings
if named_tensor.name != 'lengths'
]
inputs_sxnxd = dragnn.get_input_tensor_with_stride(
fixed_embeddings, linked_embeddings, stride)
# Since get_input_tensor_with_stride() concatenates the input embeddings, it
# obscures the static activation dimension, which the RNN library requires.
# Restore it using set_shape(). Note that set_shape() merges into the known
# shape, so only specify the activation dimension.
inputs_sxnxd.set_shape(
[tf.Dimension(None), tf.Dimension(None), self._input_dim])
initial_states_forward, initial_states_backward = (
self._create_initial_states(stride))
if during_training:
cells_forward = self._train_cells_forward
cells_backward = self._train_cells_backward
else:
cells_forward = self._inference_cells_forward
cells_backward = self._inference_cells_backward
def _bilstm_closure(scope):
"""Applies the bi-LSTM to the current inputs."""
outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_forward,
cells_backward,
inputs_sxnxd,
initial_states_fw=initial_states_forward,
initial_states_bw=initial_states_backward,
sequence_length=lengths_s,
parallel_iterations=self._attrs['parallel_iterations'],
scope=scope)
return outputs_sxnxd
# Layer outputs are not batched; flatten out the batch dimension.
outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)
outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])
return self._append_base_layers([outputs_snxd])