当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.sigmoid函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sigmoid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(1, 2, state)     
        
            i = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "i")  
            j = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "j")   
            f = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "f")   
            o = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "o")   
        
#             concat = _linear([inputs, h], 4 * self._num_units, True)
#             # i = input_gate, j = new_input, f = forget_gate, o = output_gate
#             i , j, f, o = array_ops.split(1, 4, concat)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat(1, [new_c, new_h])
            return new_h, new_state
开发者ID:yuqirose,项目名称:tensor-compress,代码行数:27,代码来源:TensorBasicLSTMCell.py

示例2: __call__

  def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
开发者ID:KalraA,项目名称:tensorflow,代码行数:27,代码来源:rnn_cell.py

示例3: GetParams

 def GetParams(self):
   """Tests for scale & elementwise layers in TF-TRT."""
   input_name = "input"
   input_dims = [10, 24, 24, 20]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(
         dtype=dtypes.float32, shape=input_dims, name=input_name)
     for weights_shape in [
         (1,),  # scale
         (24, 1, 1),  # scale
         (24, 24, 20),  # scale
         (20,),  # elementwise
         (1, 24, 1, 1),  # elementwise
         (1, 24, 24, 1),  # elementwise
         (1, 24, 24, 20),  # elementwise
         (24, 20),  # elementwise
     ]:
       a = self._ConstOp(weights_shape)
       f = x + a
       x = math_ops.sigmoid(f)
       a = self._ConstOp(weights_shape)
       f = a + x
       x = math_ops.sigmoid(f)
     gen_array_ops.reshape(x, [5, -1], name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(5, 23040)])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:32,代码来源:binary_tensor_weight_broadcast_test.py

示例4: call

  def call(self, inputs, state):
    """
    """
    (c_prev, m_prev) = state
    self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
    scope = vs.get_variable_scope()
    with vs.variable_scope(scope, initializer=self._initializer):
      x = array_ops.concat([inputs, m_prev], axis=1)
      with vs.variable_scope("first_gemm"):
        if self._linear1 is None:
          # no bias for bottleneck
          self._linear1 = _Linear(x, self._fact_size, False)
        R_fact = self._linear1(x)
      with vs.variable_scope("second_gemm"):
        if self._linear2 is None:
          self._linear2 = _Linear(R_fact, 4*self._num_units, True)
        R = self._linear2(R_fact)
      i, j, f, o = array_ops.split(R, 4, 1)

      c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
           math_ops.sigmoid(i) * math_ops.tanh(j))
      m = math_ops.sigmoid(o) * self._activation(c)

    if self._num_proj is not None:
      with vs.variable_scope("projection"):
        if self._linear3 is None:
          self._linear3 = _Linear(m, self._num_proj, False)
        m = self._linear3(m)

    new_state = rnn_cell_impl.LSTMStateTuple(c, m)
    return m, new_state
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:31,代码来源:flstm.py

示例5: LSTMCell

 def LSTMCell(cls, x, mprev, cprev, weights):
   xm = array_ops.concat([x, mprev], 1)
   i_i, i_g, f_g, o_g = array_ops.split(
       value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
   new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
       i_g) * math_ops.tanh(i_i)
   new_c = clip_ops.clip_by_value(new_c, -50.0, 50.0)
   new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
   return new_m, new_c
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:function_test.py

示例6: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PredictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PredictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
   predictions[PredictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:9,代码来源:head.py

示例7: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PedictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PedictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PedictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
   # Workaround for argmax dropping the second demension.
   predictions[PedictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:10,代码来源:head.py

示例8: __call__

 def __call__(self, inputs, state, scope=None):
     """Gated recurrent unit (GRU) with nunits cells."""
     with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
         with vs.variable_scope("Gates"):  # Reset gate and update gate.
             # We start with bias of 1.0 to not reset and not update.
             r, u = array_ops.split(1, 2, linear([inputs, state], 2 * self._num_units, True, 1.0))
             r, u = sigmoid(r), sigmoid(u)
         with vs.variable_scope("Candidate"):
             c = tanh(linear([inputs, r * state], self._num_units, True))
         new_h = u * state + (1 - u) * c
     return new_h, new_h
开发者ID:ExploreMailbot,项目名称:tensorflow,代码行数:11,代码来源:rnn_cell.py

示例9: __call__

 def __call__(self, inputs, state, scope=None):
     """Gated recurrent unit (GRU) with nunits cells."""
     with vs.variable_scope(scope or "gru_cell"):
         with vs.variable_scope("gates"):  # Reset gate and update gate.
             # We start with bias of 1.0 to not reset and not update.
             r, u = array_ops.split(1, 2, _linear([inputs, state], 2 * self._num_units, True, 1.0, scope=scope))
             r, u = sigmoid(r), sigmoid(u)
         with vs.variable_scope("candidate"):
             c = self._activation(_linear([inputs, r * state], self._num_units, True, scope=scope))
         new_h = u * state + (1 - u) * c
     return new_h, new_h
开发者ID:yuikns,项目名称:tensorflow,代码行数:11,代码来源:rnn_cell.py

示例10: _logits_to_predictions

 def _logits_to_predictions(self, logits):
   """See `_MultiClassHead`."""
   predictions = {prediction_key.PredictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
         logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[prediction_key.PredictionKey.PROBABILITIES] = math_ops.sigmoid(
       logits)
   predictions[prediction_key.PredictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:12,代码来源:head.py

示例11: __call__

  def __call__(self, inputs, state, scope=None):
    """Recurrent Highway Network cell (RHN)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRHNCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        y = state
      else:
        y = array_ops.split(1, 1, state)
      assert self._recurrence_depth > 0 and type(self._recurrence_depth) is int
      # h_transform = [None] * self._recurrence_depth
      # t = [None] * self._recurrence_depth
      # s = [None] * self._recurrence_depth
      # concat = [None] * self._recurrence_depth
      # for i in range(self._recurrence_depth):
      #   if i == 0:
      #     concat[i] = _linear([inputs, h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * _linear([inputs], 1 * self._num_units, False)
      #   if i > 0:
      #     concat[i] = _linear([h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * s[i-1]

      # ALTERNATIVE IMPLEMENTATION:
      for i in range(self._recurrence_depth):
        if i == 0:
          concat = _linear([inputs, y], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * _linear([inputs], 1 * self._num_units, False)
        if i > 0:
          concat = _linear([s], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * s
      new_y = s

      if self._state_is_tuple:
        new_state = RHNStateTuple(new_y)
      else:
        new_state = array_ops.concat(1, new_y)
      return new_y
开发者ID:julian121266,项目名称:tensorflow,代码行数:52,代码来源:rnn_cell.py

示例12: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM) with hypernetworks and layer normalization."""
    with vs.variable_scope(scope or type(self).__name__):
      # Parameters of gates are concatenated into one multiply for efficiency.
      total_h, total_c = tf.split(1, 2, state)
      h = total_h[:, 0:self._num_units]
      c = total_c[:, 0:self._num_units]

      self.hyper_state = tf.concat(1, [total_h[:, self._num_units:], total_c[:, self._num_units:]])
      hyper_input = tf.concat(1, [inputs, h])
      hyper_output, hyper_new_state = self.hyper_cell(hyper_input, self.hyper_state)
      self.hyper_output = hyper_output
      self.hyper_state = hyper_new_state

      input_below_ = rnn_cell._linear([inputs],
                                      4 * self._num_units, False, scope="out_1")
      input_below_ = self.hyper_norm(input_below_, 4 * self._num_units, scope="hyper_x")
      state_below_ = rnn_cell._linear([h],
                                      4 * self._num_units, False, scope="out_2")
      state_below_ = self.hyper_norm(state_below_, 4 * self._num_units, scope="hyper_h")

      if self.is_layer_norm:
        s1 = vs.get_variable("s1", initializer=tf.ones([4 * self._num_units]), dtype=tf.float32)
        s2 = vs.get_variable("s2", initializer=tf.ones([4 * self._num_units]), dtype=tf.float32)
        s3 = vs.get_variable("s3", initializer=tf.ones([self._num_units]), dtype=tf.float32)

        b1 = vs.get_variable("b1", initializer=tf.zeros([4 * self._num_units]), dtype=tf.float32)
        b2 = vs.get_variable("b2", initializer=tf.zeros([4 * self._num_units]), dtype=tf.float32)
        b3 = vs.get_variable("b3", initializer=tf.zeros([self._num_units]), dtype=tf.float32)


        input_below_ = ln(input_below_, s1, b1)


        state_below_ = ln(state_below_, s2, b2)

      lstm_matrix = tf.add(input_below_, state_below_)
      i, j, f, o = array_ops.split(1, 4, lstm_matrix)
      new_c = (c * sigmoid(f) + sigmoid(i) *
               self._activation(j))

      # Currently normalizing c causes lot of nan's in the model, thus commenting it out for now.
      # new_c_ = ln(new_c, s3, b3)
      new_c_ = new_c
      new_h = self._activation(new_c_) * sigmoid(o)

      hyper_h, hyper_c = tf.split(1, 2, hyper_new_state)
      new_total_h = tf.concat(1, [new_h, hyper_h])
      new_total_c = tf.concat(1, [new_c, hyper_c])
      new_total_state = tf.concat(1, [new_total_h, new_total_c])
      return new_h, new_total_state
开发者ID:pbhatia243,项目名称:tf-layer-norm,代码行数:51,代码来源:layers.py

示例13: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      new_h = tanh(new_c) * sigmoid(o)

      return new_h, array_ops.concat(1, [new_c, new_h])
开发者ID:AdvanceCodingTechnology,项目名称:tensorflow,代码行数:14,代码来源:rnn_cell.py

示例14: __call__

  def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    dim = self._num_units
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        with vs.variable_scope( "Layer_Parameters"):

          s1 = vs.get_variable("s1", initializer=tf.ones([2*dim]), dtype=tf.float32)
          s2 = vs.get_variable("s2", initializer=tf.ones([2*dim]), dtype=tf.float32)
          s3 = vs.get_variable("s3", initializer=tf.ones([dim]), dtype=tf.float32)
          s4 = vs.get_variable("s4", initializer=tf.ones([dim]), dtype=tf.float32)
          b1 = vs.get_variable("b1", initializer=tf.zeros([2*dim]), dtype=tf.float32)
          b2 = vs.get_variable("b2", initializer=tf.zeros([2*dim]), dtype=tf.float32)
          b3 = vs.get_variable("b3", initializer=tf.zeros([dim]), dtype=tf.float32)
          b4 = vs.get_variable("b4", initializer=tf.zeros([dim]), dtype=tf.float32)


          # Code below initialized for all cells
          # s1 = tf.Variable(tf.ones([2 * dim]), name="s1")
          # s2 = tf.Variable(tf.ones([2 * dim]), name="s2")
          # s3 = tf.Variable(tf.ones([dim]), name="s3")
          # s4 = tf.Variable(tf.ones([dim]), name="s4")
          # b1 = tf.Variable(tf.zeros([2 * dim]), name="b1")
          # b2 = tf.Variable(tf.zeros([2 * dim]), name="b2")
          # b3 = tf.Variable(tf.zeros([dim]), name="b3")
          # b4 = tf.Variable(tf.zeros([dim]), name="b4")

        input_below_ = rnn_cell._linear([inputs],
                               2 * self._num_units, False, scope="out_1")
        input_below_ = ln(input_below_, s1, b1)
        state_below_ = rnn_cell._linear([state],
                               2 * self._num_units, False, scope="out_2")
        state_below_ = ln(state_below_, s2, b2)
        out =tf.add(input_below_, state_below_)
        r, u = array_ops.split(1, 2, out)
        r, u = sigmoid(r), sigmoid(u)

      with vs.variable_scope("Candidate"):
          input_below_x = rnn_cell._linear([inputs],
                                           self._num_units, False, scope="out_3")
          input_below_x = ln(input_below_x, s3, b3)
          state_below_x = rnn_cell._linear([state],
                                           self._num_units, False, scope="out_4")
          state_below_x = ln(state_below_x, s4, b4)
          c_pre = tf.add(input_below_x,r * state_below_x)
          c = self._activation(c_pre)
      new_h = u * state + (1 - u) * c
    return new_h, new_h
开发者ID:jessemzhang,项目名称:deep_learning_genomics_nlp,代码行数:49,代码来源:LN_rnn_cell.py

示例15: __call__

    def __call__(self, inputs, state, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        dtype = inputs.dtype
        batch_size, feature_size = inputs.get_shape().as_list()
        if self._use_tgate:
            # Time gate
            feature_size = feature_size - 1
            tvscope = vs.get_variable_scope()
            with vs.variable_scope(tvscope, initializer=None) as unit_scope:
                with vs.variable_scope(unit_scope) as time_gate_scope:
                    w_t1 = vs.get_variable(
                        "w_t1", shape=[1, self._num_units], dtype=dtype)
                    bias_t1 = vs.get_variable(
                        "bias_t1", [self._num_units], dtype=dtype,
                        initializer=init_ops.constant_initializer(0.0, dtype=dtype))
                    w_tx1 = vs.get_variable(
                        "w_tx1", shape=[feature_size, self._num_units], dtype=dtype)
                seq = tf.slice(inputs, begin=[0, 0], size=[batch_size, feature_size])
                delta_t = tf.slice(inputs, begin=[0, 56], size=[batch_size, 1])


                t1_act = (self._activation(math_ops.matmul(delta_t, w_t1)) +
                          math_ops.matmul(seq, w_tx1) + bias_t1)
                t1 = sigmoid(t1_act)
                inputs = seq
        # for initial state
        (state, state_decay) = state
        with vs.variable_scope("gates"):  # Reset gate and update gate.
            # We start with bias of 1.0 to not reset and not update.
            value = sigmoid(_linear(
                [inputs, state], 2 * self._num_units, True, 1.0))
            r, u = array_ops.split(value=value,
                                   num_or_size_splits=2,
                                   axis=1)
        with vs.variable_scope("candidate"):
            c = self._activation(_linear([inputs, r * state],
                                         self._num_units, True))
        new_h = u * state + (1 - u) * c

        if self._use_tgate:
            new_h_decay = u * t1 * state_decay + (1 - u * t1) * c
            new_state = (new_h, new_h_decay)
            new_state = (TGRUStateTuple(new_h, new_h_decay))
            new_h = tf.concat([new_h, new_h_decay], axis=1)
        else:
            new_state = (new_h, new_h)
            new_state = (TGRUStateTuple(new_h, new_h))

        return new_h, new_state
开发者ID:gjjg1331jggj,项目名称:Attention-GRU-3M,代码行数:49,代码来源:trnn.py


注:本文中的tensorflow.python.ops.math_ops.sigmoid函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。