当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.tanh方法代码示例

本文整理汇总了Python中tensorflow.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.tanh方法的具体用法?Python tensorflow.tanh怎么用?Python tensorflow.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _NonLinearity

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def _NonLinearity(self, code):
    """Returns the non-linearity function pointer for the given string code.

    For forwards compatibility, allows the full names for stand-alone
    non-linearities, as well as the single-letter names used in ops like C,F.
    Args:
      code: String code representing a non-linearity function.
    Returns:
      non-linearity function represented by the code.
    """
    if code in ['s', 'Sig']:
      return tf.sigmoid
    elif code in ['t', 'Tanh']:
      return tf.tanh
    elif code in ['r', 'Relu']:
      return tf.nn.relu
    elif code in ['m', 'Smax']:
      return tf.nn.softmax
    return None 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:vgslspecs.py

示例2: _Apply

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def _Apply(self, *args):
    xtransform = self._TransformInputs(*args)
    depth_axis = len(self._output_shape) - 1

    if self.hidden is not None:
      htransform = self._TransformHidden(self.hidden)
      f, i, j, o = tf.split(
          value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis)
    else:
      f, i, j, o = tf.split(
          value=xtransform, num_or_size_splits=4, axis=depth_axis)

    if self.cell is not None:
      self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j)
    else:
      self.cell = tf.sigmoid(i) * tf.tanh(j)

    self.hidden = tf.sigmoid(o) * tf.tanh(self.cell)
    return self.hidden 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:blocks_lstm.py

示例3: get_cell

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def get_cell(self):
    self.cell_input_dim = self.internal_dim

    def mlp(cell_input, prev_internal_state):
      w1 = tf.get_variable('w1', [self.cell_input_dim, self.internal_dim])
      b1 = tf.get_variable('b1', [self.internal_dim])

      w2 = tf.get_variable('w2', [self.internal_dim, self.internal_dim])
      b2 = tf.get_variable('b2', [self.internal_dim])

      w3 = tf.get_variable('w3', [self.internal_dim, self.internal_dim])
      b3 = tf.get_variable('b3', [self.internal_dim])

      proj = tf.get_variable(
          'proj', [self.internal_dim, self.output_dim])

      hidden = cell_input
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w1), b1))
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w2), b2))

      output = tf.matmul(hidden, proj)

      return output, hidden

    return mlp 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:policy.py

示例4: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def __call__(self, observation, state):
    with tf.variable_scope('policy'):
      x = tf.contrib.layers.flatten(observation)
      mean = tf.contrib.layers.fully_connected(
          x,
          self._action_size,
          tf.tanh,
          weights_initializer=self._mean_weights_initializer)
      logstd = tf.get_variable('logstd', mean.shape[1:], tf.float32,
                               self._logstd_initializer)
      logstd = tf.tile(logstd[None, ...],
                       [tf.shape(mean)[0]] + [1] * logstd.shape.ndims)
    with tf.variable_scope('value'):
      x = tf.contrib.layers.flatten(observation)
      for size in self._value_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      value = tf.contrib.layers.fully_connected(x, 1, None)[:, 0]
    return (mean, logstd, value), state 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:20,代码来源:networks.py

示例5: build_mlp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
    """
        Builds a feedforward neural network
        
        arguments:
            input_placeholder: placeholder variable for the state (batch_size, input_size)
            output_size: size of the output layer
            scope: variable scope of the network
            n_layers: number of hidden layers
            size: dimension of the hidden layer
            activation: activation of the hidden layers
            output_activation: activation of the ouput layers

        returns:
            output placeholder of the network (the result of a forward pass) 

        Hint: use tf.layers.dense    
    """
    # YOUR HW2 CODE HERE
    with tf.variable_scope(scope):
        h = input_placeholder
        for i in range(n_layers):
            h = tf.layers.dense(h, size, activation=activation, name='h{}'.format(i + 1))
        output_placeholder = tf.layers.dense(h, output_size, activation=output_activation, name='output')
    return output_placeholder 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:27,代码来源:train_ac_f18.py

示例6: build_mlp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
    """
        Builds a feedforward neural network
        
        arguments:
            input_placeholder: placeholder variable for the state (batch_size, input_size)
            output_size: size of the output layer
            scope: variable scope of the network
            n_layers: number of hidden layers
            size: dimension of the hidden layer
            activation: activation of the hidden layers
            output_activation: activation of the ouput layers

        returns:
            output placeholder of the network (the result of a forward pass) 

        Hint: use tf.layers.dense    
    """
    # YOUR CODE HERE
    with tf.variable_scope(scope):
        h = input_placeholder
        for i in range(n_layers):
            h = tf.layers.dense(h, size, activation=activation, name='h{}'.format(i + 1))
        output_placeholder = tf.layers.dense(h, output_size, activation=output_activation, name='output')
    return output_placeholder 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:27,代码来源:train_pg_f18.py

示例7: build_mlp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_mlp(x, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
    """
    builds a feedforward neural network

    arguments:
        x: placeholder variable for the state (batch_size, input_size)
        regularizer: regularization for weights
        (see `build_policy()` for rest)

    returns:
        output placeholder of the network (the result of a forward pass)
    """
    i = 0
    for i in range(n_layers):
        x = tf.layers.dense(inputs=x,units=size, activation=activation, name='fc{}'.format(i), kernel_regularizer=regularizer, bias_regularizer=regularizer)

    x = tf.layers.dense(inputs=x, units=output_size, activation=output_activation, name='fc{}'.format(i + 1), kernel_regularizer=regularizer, bias_regularizer=regularizer)
    return x 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:20,代码来源:train_policy.py

示例8: build_rnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_rnn(x, h, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
    """
    builds a gated recurrent neural network
    inputs are first embedded by an MLP then passed to a GRU cell

    make MLP layers with `size` number of units
    make the GRU with `output_size` number of units
    use `activation` as the activation function for both MLP and GRU

    arguments:
        (see `build_policy()`)

    hint: use `build_mlp()`
    """
    #====================================================================================#
    #                           ----------PROBLEM 2----------
    #====================================================================================#
    # YOUR CODE HERE
    x = build_mlp(x, output_size, scope, n_layers, size, activation, activation, regularizer)
    gru = tf.keras.layers.GRU(output_size, activation=activation, return_sequences=False, return_state=True)
    x, h = gru(x, h)
    return x, h 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:24,代码来源:train_policy.py

示例9: build_critic

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_critic(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None, regularizer=None):
    """
    build recurrent critic

    arguments:
        regularizer: regularization for weights
        (see `build_policy()` for rest)

    n.b. the policy and critic should not share weights
    """
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        if recurrent:
            x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)
        else:
            x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
            x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation, regularizer=regularizer)
        x = tf.layers.dense(x, output_size, activation=output_activation, name='decoder', kernel_regularizer=regularizer, bias_regularizer=regularizer)
    return x 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:20,代码来源:train_policy.py

示例10: build_mlp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
    """
        Builds a feedforward neural network
        
        arguments:
            input_placeholder: placeholder variable for the state (batch_size, input_size)
            output_size: size of the output layer
            scope: variable scope of the network
            n_layers: number of hidden layers
            size: dimension of the hidden layer
            activation: activation of the hidden layers
            output_activation: activation of the ouput layers

        returns:
            output placeholder of the network (the result of a forward pass) 

        Hint: use tf.layers.dense    
    """
    output_placeholder = input_placeholder
    with tf.variable_scope(scope):
        for _ in range(n_layers):
            output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation)
        output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation)
    return output_placeholder 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:26,代码来源:ex_utils.py

示例11: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def call(self, inputs):
        mean_and_log_std = self.model(inputs)
        mean, log_std = tf.split(mean_and_log_std, num_or_size_splits=2, axis=1)
        log_std = tf.clip_by_value(log_std, -20., 2.)
        
        distribution = tfp.distributions.MultivariateNormalDiag(
            loc=mean,
            scale_diag=tf.exp(log_std)
        )
        
        raw_actions = distribution.sample()
        if not self._reparameterize:
            ### Problem 1.3.A
            ### YOUR CODE HERE
            raw_actions = tf.stop_gradient(raw_actions)
        log_probs = distribution.log_prob(raw_actions)
        log_probs -= self._squash_correction(raw_actions)

        ### Problem 2.A
        ### YOUR CODE HERE
        self.actions = tf.tanh(raw_actions)
            
        return self.actions, log_probs 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:25,代码来源:nn.py

示例12: stacked_lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def stacked_lstm(self, inputs, states, hidden_size, output_size, nlayers):
    """Stacked LSTM layers with FC layers as input and output embeddings.

    Args:
      inputs: input tensor
      states: a list of internal lstm states for each layer
      hidden_size: number of lstm units
      output_size: size of the output
      nlayers: number of lstm layers
    Returns:
      net: output of the network
      skips: a list of updated lstm states for each layer
    """
    net = inputs
    net = slim.layers.fully_connected(
        net, hidden_size, activation_fn=None, scope="af1")
    for i in range(nlayers):
      net, states[i] = self.basic_lstm(
          net, states[i], hidden_size, scope="alstm%d"%i)
    net = slim.layers.fully_connected(
        net, output_size, activation_fn=tf.tanh, scope="af2")
    return net, states 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:next_frame.py

示例13: conv_lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def conv_lstm(x,
              kernel_size,
              filters,
              padding="SAME",
              dilation_rate=(1, 1),
              name=None,
              reuse=None):
  """Convolutional LSTM in 1 dimension."""
  with tf.variable_scope(
      name, default_name="conv_lstm", values=[x], reuse=reuse):
    gates = conv(
        x,
        4 * filters,
        kernel_size,
        padding=padding,
        dilation_rate=dilation_rate)
    g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
    new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
    return tf.sigmoid(g[2]) * tf.tanh(new_cell) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:common_layers.py

示例14: lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
    nbatch, nin = [v.value for v in xs[0].get_shape()]
    nsteps = len(xs)
    with tf.variable_scope(scope):
        wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
        wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
        b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))

    c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
    for idx, (x, m) in enumerate(zip(xs, ms)):
        c = c*(1-m)
        h = h*(1-m)
        z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
        i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
        i = tf.nn.sigmoid(i)
        f = tf.nn.sigmoid(f)
        o = tf.nn.sigmoid(o)
        u = tf.tanh(u)
        c = f*c + i*u
        h = o*tf.tanh(c)
        xs[idx] = h
    s = tf.concat(axis=1, values=[c, h])
    return xs, s 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:25,代码来源:utils.py

示例15: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tanh [as 别名]
def __init__(self, params, ob_space, ac_space, nbatch, nsteps): #pylint: disable=W0613
        ob_shape = (nbatch,) + ob_space.shape
        X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
        with tf.name_scope('policy_new'):
            activ = tf.tanh
            h1 = activ(tf.nn.xw_plus_b(X, params['policy/pi_fc1/w:0'], params['policy/pi_fc1/b:0']))
            h2 = activ(tf.nn.xw_plus_b(h1, params['policy/pi_fc2/w:0'], params['policy/pi_fc2/b:0']))
            pi = tf.nn.xw_plus_b(h2, params['policy/pi/w:0'], params['policy/pi/b:0'])
            logstd = params['policy/logstd:0']

        pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)

        self.pdtype = make_pdtype(ac_space)
        self.pd = self.pdtype.pdfromflat(pdparam)

        self.X = X 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:18,代码来源:policies.py


注:本文中的tensorflow.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。