当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.sigmoid方法代码示例

本文整理汇总了Python中tensorflow.sigmoid方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sigmoid方法的具体用法?Python tensorflow.sigmoid怎么用?Python tensorflow.sigmoid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.sigmoid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build_score_converter

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def _build_score_converter(score_converter_config):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return tf.identity
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return tf.sigmoid
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return tf.nn.softmax
  raise ValueError('Unknown score converter.') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:post_processing_builder.py

示例2: _Apply

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def _Apply(self, *args):
    xtransform = self._TransformInputs(*args)
    depth_axis = len(self._output_shape) - 1

    if self.hidden is not None:
      htransform = self._TransformHidden(self.hidden)
      f, i, j, o = tf.split(
          value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis)
    else:
      f, i, j, o = tf.split(
          value=xtransform, num_or_size_splits=4, axis=depth_axis)

    if self.cell is not None:
      self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j)
    else:
      self.cell = tf.sigmoid(i) * tf.tanh(j)

    self.hidden = tf.sigmoid(o) * tf.tanh(self.cell)
    return self.hidden 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:blocks_lstm.py

示例3: conv_lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def conv_lstm(x,
              kernel_size,
              filters,
              padding="SAME",
              dilation_rate=(1, 1),
              name=None,
              reuse=None):
  """Convolutional LSTM in 1 dimension."""
  with tf.variable_scope(
      name, default_name="conv_lstm", values=[x], reuse=reuse):
    gates = conv(
        x,
        4 * filters,
        kernel_size,
        padding=padding,
        dilation_rate=dilation_rate)
    g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
    new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
    return tf.sigmoid(g[2]) * tf.tanh(new_cell) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:common_layers.py

示例4: gated_linear_unit_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = tf.layers.dense(x, depth * 2, activation=None)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:common_layers.py

示例5: yolo_boxes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def yolo_boxes(pred, anchors, num_classes, training=True):
    # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
    grid_size = tf.shape(pred)[1:3][::-1]
    grid_y, grid_x = tf.shape(pred)[1], tf.shape(pred)[2]

    box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, num_classes), axis=-1)
    box_xy = tf.sigmoid(box_xy)

    objectness = tf.sigmoid(objectness)
    class_probs = tf.nn.softmax(class_probs)
    pred_box = tf.concat((box_xy, box_wh), axis=-1)  # original xywh for loss

    # !!! grid[x][y] == (y, x)
    grid = tf.meshgrid(tf.range(grid_x), tf.range(grid_y))
    grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)  # [gx, gy, 1, 2]

    box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
    box_wh = tf.exp(box_wh) * anchors

    box_x1y1 = box_xy - box_wh / 2
    box_x2y2 = box_xy + box_wh / 2
    bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)

    return bbox, objectness, class_probs, pred_box 
开发者ID:akkaze,项目名称:tf2-yolo3,代码行数:26,代码来源:models.py

示例6: _build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def _build_model(self):
        # define initial relation features
        if self.use_context or (self.use_path and self.path_type == 'rnn'):
            self._build_relation_feature()

        self.scores = 0.0

        if self.use_context:
            edges_list, mask_list = self._get_neighbors_and_masks(self.labels, self.entity_pairs, self.train_edges)
            self.aggregators = self._get_neighbor_aggregators()  # define aggregators for each layer
            self.aggregated_neighbors = self._aggregate_neighbors(edges_list, mask_list)  # [batch_size, n_relations]
            self.scores += self.aggregated_neighbors

        if self.use_path:
            if self.path_type == 'embedding':
                self.W, self.b = self._get_weight_and_bias(self.n_paths, self.n_relations)  # [batch_size, n_relations]
                self.scores += tf.sparse_tensor_dense_matmul(self.path_features, self.W) + self.b

            elif self.path_type == 'rnn':
                rnn_output = self._rnn(self.path_ids)  # [batch_size, path_samples, n_relations]
                self.scores += self._aggregate_paths(rnn_output)

        # narrow the range of scores to [0, 1] for the ease of calculating ranking-based metrics
        self.scores_normalized = tf.sigmoid(self.scores) 
开发者ID:hwwang55,项目名称:PathCon,代码行数:26,代码来源:model.py

示例7: build_score_converter

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def build_score_converter(score_converter_config, is_training):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid] score converters based on the config
  and whether the BoxPredictor is for training or inference.

  Args:
    score_converter_config:
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
    is_training: Indicates whether the BoxPredictor is in training mode.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
    return tf.identity
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
    return tf.identity if is_training else tf.sigmoid
  raise ValueError('Unknown score converter.') 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:26,代码来源:box_predictor_builder.py

示例8: _build_score_converter

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def _build_score_converter(score_converter_config, logit_scale):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.
    logit_scale: temperature to use for SOFTMAX score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return _score_converter_fn_with_logit_scale(tf.identity, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale)
  raise ValueError('Unknown score converter.') 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:25,代码来源:post_processing_builder.py

示例9: is_normalized

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def is_normalized(self):
    """Returns true only if the associated loss is normalized.

    We call a classification loss "normalized" if there exists a random variable
    Z such that, for any values of the predictions and weights:

    > loss(predictions, weights) = E[zero-one-loss(predictions + Z, weights)]

    where the expectation is taken over Z.

    Intuitively, a normalized loss can be interpreted as a smoothed zero-one
    loss (e.g. a ramp or a sigmoid), while a non-normalized loss will typically
    be some unbounded relaxation (e.g. a hinge).

    Returns:
      True if the loss is normalized. False otherwise.
    """ 
开发者ID:google-research,项目名称:tensorflow_constrained_optimization,代码行数:19,代码来源:loss.py

示例10: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def __call__(self, inputs, state, scope=None):
        num_proj = self._num_units if self._num_proj is None else self._num_proj

        c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
        m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])

        input_size = inputs.get_shape().with_rank(2)[1]
        if input_size.value is None:
            raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
        with tf.variable_scope(type(self).__name__,
                               initializer=self._initializer):  # "LSTMCell"
            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            cell_inputs = tf.concat(1, [inputs, m_prev])
            lstm_matrix = tf.nn.bias_add(tf.matmul(cell_inputs, self._concat_w), self._b)
            i, j, f, o = tf.split(1, 4, lstm_matrix)

            c = tf.sigmoid(f + 1.0) * c_prev + tf.sigmoid(i) * tf.tanh(j)
            m = tf.sigmoid(o) * tf.tanh(c)

            if self._num_proj is not None:
                m = tf.matmul(m, self._concat_w_proj)

        new_state = tf.concat(1, [c, m])
        return m, new_state 
开发者ID:rafaljozefowicz,项目名称:lm,代码行数:26,代码来源:model_utils.py

示例11: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def call(self, inputs, **kwargs):

        inputs_normed = self.bn(inputs)
        # tf.layers.batch_normalization(
        # inputs, axis=self.axis, epsilon=self.epsilon, center=False, scale=False)
        x_p = tf.sigmoid(inputs_normed)
        return self.alphas * (1.0 - x_p) * inputs + x_p * inputs 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:9,代码来源:activation.py

示例12: inference

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def inference(input_data):
    with tf.variable_scope('hidden1'):
        # 第一层 16 个
        weights = tf.get_variable("weight", [1, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [1, 16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden1 = tf.sigmoid(tf.multiply(input_data, weights) + biases)

    with tf.variable_scope('hidden2'):
        # 第二层 16 个
        weights = tf.get_variable("weight", [16, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden2 = tf.sigmoid(tf.matmul(hidden1, weights) + biases)

    with tf.variable_scope('hidden3'):
        # 第三层 16 个
        weights = tf.get_variable("weight", [16, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden3 = tf.sigmoid(tf.matmul(hidden2, weights) + biases)

    with tf.variable_scope('output_layer'):
        # 输出层
        weights = tf.get_variable("weight", [16, 1], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [1], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        output = tf.matmul(hidden3, weights) + biases
    return output


# 训练 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:38,代码来源:4_simulate_sin.py

示例13: LogisticClassifier

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
  with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
                         reuse=reuse):
    predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
                                       scope='fully_connected')
    slim.losses.log_loss(predictions, labels)
    return predictions 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:9,代码来源:model_deploy_test.py

示例14: BatchNormClassifier

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
  with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
                         reuse=reuse):
    inputs = slim.batch_norm(inputs, decay=0.1)
    predictions = slim.fully_connected(inputs, 1,
                                       activation_fn=tf.sigmoid,
                                       scope='fully_connected')
    slim.losses.log_loss(predictions, labels)
    return predictions 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:model_deploy_test.py

示例15: LSTMCell

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sigmoid [as 别名]
def LSTMCell(x, mprev, cprev, key, params):
  """Create an LSTM cell.

  Implements the equations in pg.2 from
  "Long Short-Term Memory Based Recurrent Neural Network Architectures
  For Large Vocabulary Speech Recognition",
  Hasim Sak, Andrew Senior, Francoise Beaufays.

  Args:
    w: A dictionary of the weights and optional biases as returned
      by LSTMParametersSplit().
    x: Inputs to this cell.
    mprev: m_{t-1}, the recurrent activations (same as the output)
      from the previous cell.
    cprev: c_{t-1}, the cell activations from the previous cell.
    keep_prob: Keep probability on the input and the outputs of a cell.

  Returns:
    m: Outputs of this cell.
    c: Cell Activations.
    """

  i = tf.matmul(x, params[key + "_ix"]) + tf.matmul(mprev, params[key + "_im"])
  i = tf.nn.bias_add(i, params[key + "_i"])
  f = tf.matmul(x, params[key + "_fx"]) + tf.matmul(mprev, params[key + "_fm"])
  f = tf.nn.bias_add(f, params[key + "_f"])
  c = tf.matmul(x, params[key + "_cx"]) + tf.matmul(mprev, params[key + "_cm"])
  c = tf.nn.bias_add(c, params[key + "_c"])
  o = tf.matmul(x, params[key + "_ox"]) + tf.matmul(mprev, params[key + "_om"])
  o = tf.nn.bias_add(o, params[key + "_o"])
  i = tf.sigmoid(i, name="i_gate")
  f = tf.sigmoid(f, name="f_gate")
  o = tf.sigmoid(o, name="o_gate")
  c = f * cprev + i * tf.tanh(c)
  m = o * c
  return m, c 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:38,代码来源:nn_utils.py


注:本文中的tensorflow.sigmoid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。