當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.sigmoid方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.sigmoid方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.sigmoid方法的具體用法?Python v1.sigmoid怎麽用?Python v1.sigmoid使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.sigmoid方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: conv_lstm

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def conv_lstm(x,
              kernel_size,
              filters,
              padding="SAME",
              dilation_rate=(1, 1),
              name=None,
              reuse=None):
  """Convolutional LSTM in 1 dimension."""
  with tf.variable_scope(
      name, default_name="conv_lstm", values=[x], reuse=reuse):
    gates = conv(
        x,
        4 * filters,
        kernel_size,
        padding=padding,
        dilation_rate=dilation_rate)
    g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
    new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
    return tf.sigmoid(g[2]) * tf.tanh(new_cell) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_layers.py

示例2: gated_linear_unit_layer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = layers().Dense(depth * 2, activation=None)(x)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_layers.py

示例3: _cond_prob

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def _cond_prob(self, a, w_dec_i, b_dec_i):
    """Gets the conditional probability for a single dimension.

    Args:
      a: Model's hidden state, sized `[batch_size, num_hidden]`.
      w_dec_i: The decoder weight terms for the dimension, sized
          `[num_hidden, 1]`.
      b_dec_i: The decoder bias terms, sized `[batch_size, 1]`.

    Returns:
      cond_p_i: The conditional probability of the dimension, sized
        `[batch_size, 1]`.
      cond_l_i: The conditional logits of the dimension, sized
        `[batch_size, 1]`.
    """
    # Decode hidden units to get conditional probability.
    h = tf.sigmoid(a)
    cond_l_i = b_dec_i + tf.matmul(h, w_dec_i)
    cond_p_i = tf.sigmoid(cond_l_i)
    return cond_p_i, cond_l_i 
開發者ID:magenta,項目名稱:magenta,代碼行數:22,代碼來源:nade.py

示例4: _call_se

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def _call_se(self, input_tensor):
    """Call Squeeze and Excitation layer.

    Args:
      input_tensor: Tensor, a single input tensor for Squeeze/Excitation layer.

    Returns:
      A output tensor, which should have the same shape as input.
    """
    if self._local_pooling:
      shape = input_tensor.get_shape().as_list()
      kernel_size = [
          1, shape[self._spatial_dims[0]], shape[self._spatial_dims[1]], 1]
      se_tensor = tf.nn.avg_pool(
          input_tensor,
          ksize=kernel_size,
          strides=[1, 1, 1, 1],
          padding='VALID')
    else:
      se_tensor = tf.reduce_mean(
          input_tensor, self._spatial_dims, keepdims=True)
    se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))
    logging.info('Built Squeeze and Excitation with tensor shape: %s',
                 (se_tensor.shape))
    return tf.sigmoid(se_tensor) * input_tensor 
開發者ID:JunweiLiang,項目名稱:Object_Detection_Tracking,代碼行數:27,代碼來源:efficientnet_model.py

示例5: apply_highway_lstm

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def apply_highway_lstm(x, seq_len):
  """Run a bi-directional LSTM with highway connections over `x`.

  Args:
    x: <tf.float32>[batch, seq_len, dim]
    seq_len: <tf.int32>[batch] for None, sequence lengths of `seq2`

  Returns:
    out, <tf.float32>[batch, seq_len, out_dim]
  """
  lstm_out = apply_lstm(x, seq_len)
  proj = ops.affine(x, FLAGS.lstm_dim * 4, "w", bias_name="b")
  gate, transform = tf.split(proj, 2, 2)
  gate = tf.sigmoid(gate)
  transform = tf.tanh(transform)
  return lstm_out * gate + (1 - gate) * transform 
開發者ID:google-research,項目名稱:language,代碼行數:18,代碼來源:run_recurrent_model_boolq.py

示例6: create_nn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def create_nn(self, features, name=None):

        if name is None:
            name = self.critic_name        

        with tf.variable_scope(name + '_fc_1'):
            fc1 = layer(features, 64)
        with tf.variable_scope(name + '_fc_2'):
            fc2 = layer(fc1, 64)
        with tf.variable_scope(name + '_fc_3'):
            fc3 = layer(fc2, 64)
        with tf.variable_scope(name + '_fc_4'):
            fc4 = layer(fc3, 1, is_output=True)

            # A q_offset is used to give the critic function an optimistic initialization near 0
            output = tf.sigmoid(fc4 + self.q_offset) * self.q_limit

        return output 
開發者ID:andrew-j-levy,項目名稱:Hierarchical-Actor-Critc-HAC-,代碼行數:20,代碼來源:critic.py

示例7: build_score_converter

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def build_score_converter(score_converter_config, is_training):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid] score converters based on the config
  and whether the BoxPredictor is for training or inference.

  Args:
    score_converter_config:
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
    is_training: Indicates whether the BoxPredictor is in training mode.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
    return tf.identity
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
    return tf.identity if is_training else tf.sigmoid
  raise ValueError('Unknown score converter.') 
開發者ID:tensorflow,項目名稱:models,代碼行數:26,代碼來源:box_predictor_builder.py

示例8: _build_score_converter

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def _build_score_converter(score_converter_config, logit_scale):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.
    logit_scale: temperature to use for SOFTMAX score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return _score_converter_fn_with_logit_scale(tf.identity, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale)
  raise ValueError('Unknown score converter.') 
開發者ID:tensorflow,項目名稱:models,代碼行數:25,代碼來源:post_processing_builder.py

示例9: _do_feature_masking

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def _do_feature_masking(x, y, num_x, num_y, rounds, rank):
    for round_ in six.moves.range(rounds):
      # Even rounds correspond to input transforms. Odd rounds to state
      # transforms. Implemented this way because feature_mask_rounds=1 with a
      # single round of transforming the state does not seem to improve things
      # much. Concurrent updates were also tested, but were not an improvement
      # either.
      transforming_x = (round_ % 2 == 0)
      fm_name = 'fm_' + str(round_)
      if rank == 0:  # full rank case
        if transforming_x:
          x *= 2*tf.sigmoid(utils.linear(y, num_x, bias=True, scope=fm_name))
        else:
          y *= 2*tf.sigmoid(utils.linear(x, num_y, bias=True, scope=fm_name))
      else:  # low-rank factorization case
        if transforming_x:
          shape = [num_y, num_x]
        else:
          shape = [num_x, num_y]
        a, b = utils.low_rank_factorization(fm_name + '_weight', shape, rank)
        bias = tf.get_variable(fm_name + '_bias', shape[1],
                               initializer=tf.zeros_initializer())
        if transforming_x:
          x *= 2*tf.sigmoid(tf.matmul(tf.matmul(y, a), b) + bias)
        else:
          y *= 2*tf.sigmoid(tf.matmul(tf.matmul(x, a), b) + bias)
    return x, y 
開發者ID:deepmind,項目名稱:lamb,代碼行數:29,代碼來源:tiled_lstm.py

示例10: inv_sigmoid

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def inv_sigmoid(y):
  """Inverse sigmoid function.

  Args:
    y: float in range 0 to 1

  Returns:
    the inverse sigmoid.
  """
  return np.log(y / (1 - y)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:12,代碼來源:residual_shuffle_exchange.py

示例11: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def call(self, inputs, **kwargs):
    """Apply Residual Switch Layer to inputs.

    Args:
      inputs: Input tensor.
      **kwargs: unused kwargs.

    Returns:
      tf.Tensor: New candidate value
    """
    del kwargs
    input_shape = tf.shape(inputs)
    batch_size = input_shape[0]
    length = input_shape[1]
    num_units = inputs.shape.as_list()[2]

    n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0)
    n_bits = tf.floor(n_bits) + 1

    reshape_shape = [batch_size, length // 2, num_units * 2]
    reshaped_inputs = tf.reshape(inputs, reshape_shape)

    first_linear = self.first_linear(reshaped_inputs)
    first_linear = self.layer_norm(first_linear)
    first_linear = gelu(first_linear)
    candidate = self.second_linear(first_linear)

    residual = tf.sigmoid(self.residual_scale) * reshaped_inputs
    candidate = residual + candidate * self.candidate_weight
    candidate = tf.reshape(candidate, input_shape)

    if self.dropout > 0:
      candidate = tf.nn.dropout(candidate, rate=self.dropout / n_bits)
    if self.dropout != 0.0 and self.mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.random_normal(tf.shape(candidate), mean=1.0, stddev=0.001)
      candidate = candidate * noise

    return candidate 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:40,代碼來源:residual_shuffle_exchange.py

示例12: saturating_sigmoid

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def saturating_sigmoid(x):
  """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
  with tf.name_scope("saturating_sigmoid", values=[x]):
    y = tf.sigmoid(x)
    return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:7,代碼來源:common_layers.py

示例13: gru_feedfwd

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def gru_feedfwd(a_t, h_prev, filters, name=None):
  """position-wise Feed-fwd GRU gates following the MPNN.

  Args:
    a_t: Tensor of shape [batch, length, depth] of current input
    h_prev: Tensor of shape [batch, length, depth] of prev input
    filters: an integer specifying number of dimensions of the filters
    name: A string
  Returns:
    h_t: [batch, length, filters] hidden state
  """

  with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
    # we use right matrix multiplication to handle batches
    # W_z and W_r have shape 2d, d. U_z U_r have shape d,d
    z_t = (
        tf.sigmoid(
            tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
            tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
    r_t = (
        tf.sigmoid(
            tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
            tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
    h_tilde = (
        tf.tanh(
            tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
            tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
    h_t = (1. - z_t) * h_prev + z_t * h_tilde

  return h_t 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:32,代碼來源:common_layers.py

示例14: nac

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def nac(x, depth, name=None, reuse=None):
  """NAC as in https://arxiv.org/abs/1808.00508."""
  with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
    x_shape = shape_list(x)
    w = tf.get_variable("w", [x_shape[-1], depth])
    m = tf.get_variable("m", [x_shape[-1], depth])
    w = tf.tanh(w) * tf.nn.sigmoid(m)
    x_flat = tf.reshape(x, [-1, x_shape[-1]])
    res_flat = tf.matmul(x_flat, w)
    return tf.reshape(res_flat, x_shape[:-1] + [depth]) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:12,代碼來源:common_layers.py

示例15: write

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import sigmoid [as 別名]
def write(self, x, access_logits):
    """Write to the memory based on a combination of similarity and least used.

    Based on arXiv:1607.00036v2 [cs.LG].

    Args:
      x: a tensor in the shape of [batch_size, length, depth].
      access_logits: the logits for accessing the memory.
    Returns:
      the update op.
    """
    gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma")
    write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1)
    candidate_value = tf.layers.dense(x, self.val_depth,
                                      activation=tf.nn.relu,
                                      name="candidate_value")
    erase_gates = tf.layers.dense(x, self.memory_size,
                                  activation=tf.nn.sigmoid,
                                  name="erase")
    write_weights = tf.nn.softmax(write_logits)
    erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3)
    erase = tf.multiply(erase_weights,
                        tf.expand_dims(self.mem_vals, 1))
    addition = tf.multiply(
        tf.expand_dims(write_weights, 3),
        tf.expand_dims(candidate_value, 2))
    update_value_op = self.mem_vals.assign(
        tf.reduce_mean(erase + addition, axis=1))
    with tf.control_dependencies([update_value_op]):
      write_op = self.mean_logits.assign(
          self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1))
      return write_op 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:34,代碼來源:transformer_memory.py


注:本文中的tensorflow.compat.v1.sigmoid方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。