当前位置: 首页>>代码示例>>Python>>正文


Python sonnet.LSTM属性代码示例

本文整理汇总了Python中sonnet.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python sonnet.LSTM属性的具体用法?Python sonnet.LSTM怎么用?Python sonnet.LSTM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在sonnet的用法示例。


在下文中一共展示了sonnet.LSTM属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self,
               access_config,
               controller_config,
               output_size,
               clip_value=None,
               name='dnc'):
    """Initializes the DNC core.

    Args:
      access_config: dictionary of access module configurations.
      controller_config: dictionary of controller (LSTM) module configurations.
      output_size: output dimension size of core.
      clip_value: clips controller and core output values to between
          `[-clip_value, clip_value]` if specified.
      name: module name (default 'dnc').

    Raises:
      TypeError: if direct_input_size is not None for any access module other
        than KeyValueMemory.
    """
    super(DNC, self).__init__(name=name)

    with self._enter_variable_scope():
      self._controller = snt.LSTM(**controller_config)
      self._access = access.MemoryAccess(**access_config)

    self._access_output_size = np.prod(self._access.output_size.as_list())
    self._output_size = output_size
    self._clip_value = clip_value or 0

    self._output_size = tf.TensorShape([output_size])
    self._state_size = DNCState(
        access_output=self._access_output_size,
        access_state=self._access.state_size,
        controller_state=self._controller.state_size) 
开发者ID:deepmind,项目名称:dnc,代码行数:37,代码来源:dnc.py

示例2: make_rnn

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def make_rnn(hparams, name):
    """Constructs a DeepRNN using hparams.rnn_hidden_sizes."""
    regularizers = {
        snt.LSTM.W_GATES: regularizer(hparams)
    }
    with tf.variable_scope(name):
        layers = [snt.LSTM(size, regularizers=regularizers)
                  for size in hparams.rnn_hidden_sizes]
        return snt.DeepRNN(layers, skip_connections=False, name=name) 
开发者ID:google,项目名称:vae-seq,代码行数:11,代码来源:util.py

示例3: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self, hidden_sizes: Sequence[int], num_actions: int):
    super().__init__(name='policy_value_net')
    self._torso = snt.nets.MLP(hidden_sizes, activate_final=True, name='torso')
    self._core = snt.LSTM(hidden_sizes[-1], name='rnn')
    self._policy_head = snt.Linear(num_actions, name='policy_head')
    self._value_head = snt.Linear(1, name='value_head') 
开发者ID:deepmind,项目名称:bsuite,代码行数:8,代码来源:agent.py

示例4: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self, output_size, layers, preprocess_name="identity",
               preprocess_options=None, scale=1.0, initializer=None,
               name="deep_lstm"):
    """Creates an instance of `StandardDeepLSTM`.

    Args:
      output_size: Output sizes of the final linear layer.
      layers: Output sizes of LSTM layers.
      preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or
          tf modules). Default is `tf.identity`.
      preprocess_options: Gradient preprocessing options.
      scale: Gradient scaling (default is 1.0).
      initializer: Variable initializer for linear layer. See `snt.Linear` and
          `snt.LSTM` docs for more info. This parameter can be a string (e.g.
          "zeros" will be converted to tf.zeros_initializer).
      name: Module name.
    """
    super(StandardDeepLSTM, self).__init__(name=name)

    self._output_size = output_size
    self._scale = scale

    if hasattr(preprocess, preprocess_name):
      preprocess_class = getattr(preprocess, preprocess_name)
      self._preprocess = preprocess_class(**preprocess_options)
    else:
      self._preprocess = getattr(tf, preprocess_name)

    with tf.variable_scope(self._template.variable_scope):
      self._cores = []
      for i, size in enumerate(layers, start=1):
        name = "lstm_{}".format(i)
        init = _get_layer_initializers(initializer, name,
                                       ("w_gates", "b_gates"))
        self._cores.append(snt.LSTM(size, name=name, initializers=init))
      self._rnn = snt.DeepRNN(self._cores, skip_connections=False,
                              name="deep_rnn")

      init = _get_layer_initializers(initializer, "linear", ("w", "b"))
      self._linear = snt.Linear(output_size, name="linear", initializers=init) 
开发者ID:deepmind,项目名称:learning-to-learn,代码行数:42,代码来源:networks.py

示例5: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self, init_with_true_state=False, model='2lstm', **unused_kwargs):

        self.placeholders = {'o': tf.placeholder('float32', [None, None, 24, 24, 3], 'observations'),
                     'a': tf.placeholder('float32', [None, None, 3], 'actions'),
                     's': tf.placeholder('float32', [None, None, 3], 'states'),
                     'keep_prob': tf.placeholder('float32')}
        self.pred_states = None
        self.init_with_true_state = init_with_true_state
        self.model = model

        # build models
        # <-- observation
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/Linear'),
            tf.nn.relu,
        ])

        # <-- action
        if self.model == '2lstm':
            self.rnn1 = snt.LSTM(512)
            self.rnn2 = snt.LSTM(512)
        if self.model == '2gru':
            self.rnn1 = snt.GRU(512)
            self.rnn2 = snt.GRU(512)
        elif self.model == 'ff':
            self.ff_lstm_replacement = snt.Sequential([
                snt.Linear(512),
                tf.nn.relu,
                snt.Linear(512),
                tf.nn.relu])

        self.belief_decoder = snt.Sequential([
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(3)
        ]) 
开发者ID:tu-rbo,项目名称:differentiable-particle-filters,代码行数:43,代码来源:rnn.py

示例6: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self, obs, nums, glimpse_size=(20, 20),
                 inpt_encoder_hidden=[256]*2,
                 glimpse_encoder_hidden=[256]*2,
                 glimpse_decoder_hidden=[252]*2,
                 transform_estimator_hidden=[256]*2,
                 steps_pred_hidden=[50]*1,
                 baseline_hidden=[256, 128]*1,
                 transform_var_bias=-2.,
                 step_bias=0.,
                 *args, **kwargs):

        self.transform_var_bias = tf.Variable(transform_var_bias, trainable=False, dtype=tf.float32,
                                                       name='transform_var_bias')
        self.step_bias = tf.Variable(step_bias, trainable=False, dtype=tf.float32, name='step_bias')
        self.baseline = BaselineMLP(baseline_hidden)

        super(AIRonMNIST, self).__init__(
            *args,
            obs=obs,
            nums=nums,
            glimpse_size=glimpse_size,
            n_appearance=50,
            transition=snt.LSTM(256),
            input_encoder=partial(Encoder, inpt_encoder_hidden),
            glimpse_encoder=partial(Encoder, glimpse_encoder_hidden),
            glimpse_decoder=partial(Decoder, glimpse_decoder_hidden),
            transform_estimator=partial(StochasticTransformParam, transform_estimator_hidden,
                                      scale_bias=self.transform_var_bias),
            steps_predictor=partial(StepsPredictor, steps_pred_hidden, self.step_bias),
            output_std=.3,
            **kwargs
        ) 
开发者ID:akosiorek,项目名称:attend_infer_repeat,代码行数:34,代码来源:mnist_model.py

示例7: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def __init__(self,
               target_ensembles,
               nh_lstm,
               nh_bottleneck,
               nh_embed=None,
               dropoutrates_bottleneck=None,
               bottleneck_weight_decay=0.0,
               bottleneck_has_bias=False,
               init_weight_disp=0.0,
               name="grid_cells_core"):
    """Constructor of the RNN cell.

    Args:
      target_ensembles: Targets, place cells and head direction cells.
      nh_lstm: Size of LSTM cell.
      nh_bottleneck: Size of the linear layer between LSTM output and output.
      nh_embed: Number of hiddens between input and LSTM input.
      dropoutrates_bottleneck: Iterable of keep rates (0,1]. The linear layer is
        partitioned into as many groups as the len of this parameter.
      bottleneck_weight_decay: Weight decay used in the bottleneck layer.
      bottleneck_has_bias: If the bottleneck has a bias.
      init_weight_disp: Displacement in the weights initialisation.
      name: the name of the module.
    """
    super(GridCellsRNNCell, self).__init__(name=name)
    self._target_ensembles = target_ensembles
    self._nh_embed = nh_embed
    self._nh_lstm = nh_lstm
    self._nh_bottleneck = nh_bottleneck
    self._dropoutrates_bottleneck = dropoutrates_bottleneck
    self._bottleneck_weight_decay = bottleneck_weight_decay
    self._bottleneck_has_bias = bottleneck_has_bias
    self._init_weight_disp = init_weight_disp
    self.training = False
    with self._enter_variable_scope():
      self._lstm = snt.LSTM(self._nh_lstm) 
开发者ID:deepmind,项目名称:grid-cells,代码行数:38,代码来源:model.py

示例8: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import LSTM [as 别名]
def _build(self, init_conds, vels, training=False):
    """Outputs place, and head direction cell predictions from velocity inputs.

    Args:
      init_conds: Initial conditions given by ensemble activatons, list [BxN_i]
      vels:  Translational and angular velocities [BxTxV]
      training: Activates and deactivates dropout

    Returns:
      [logits_i]:
        logits_i: Logits predicting i-th ensemble activations (BxTxN_i)
    """
    # Calculate initialization for LSTM. Concatenate pc and hdc activations
    concat_init = tf.concat(init_conds, axis=1)

    init_lstm_state = snt.Linear(self._nh_lstm, name="state_init")(concat_init)
    init_lstm_cell = snt.Linear(self._nh_lstm, name="cell_init")(concat_init)
    self._core.training = training

    # Run LSTM
    output_seq, final_state = tf.nn.dynamic_rnn(cell=self._core,
                                                inputs=(vels,),
                                                time_major=False,
                                                initial_state=(init_lstm_state,
                                                               init_lstm_cell))
    ens_targets = output_seq[:-2]
    bottleneck = output_seq[-2]
    lstm_output = output_seq[-1]
    # Return
    return (ens_targets, bottleneck, lstm_output), final_state 
开发者ID:deepmind,项目名称:grid-cells,代码行数:32,代码来源:model.py


注:本文中的sonnet.LSTM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。