当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.not_equal方法代码示例

本文整理汇总了Python中tensorflow.not_equal方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.not_equal方法的具体用法?Python tensorflow.not_equal怎么用?Python tensorflow.not_equal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.not_equal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: bottom_simple

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def bottom_simple(self, x, name, reuse):
    with tf.variable_scope(name, reuse=reuse):
      # Ensure the inputs are 3-D
      if len(x.get_shape()) == 4:
        x = tf.squeeze(x, axis=3)
      while len(x.get_shape()) < 3:
        x = tf.expand_dims(x, axis=-1)

      var = self._get_weights()
      x = common_layers.dropout_no_scaling(
          x, 1.0 - self._model_hparams.symbol_dropout)
      ret = common_layers.gather(var, x)
      if self._model_hparams.multiply_embedding_mode == "sqrt_depth":
        ret *= self._body_input_depth**0.5
      ret *= tf.expand_dims(tf.to_float(tf.not_equal(x, 0)), -1)
      return ret 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:modalities.py

示例2: attention_bias_same_segment

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def attention_bias_same_segment(query_segment_id, memory_segment_id):
  """Create an bias tensor to be added to attention logits.

  Positions with the same segment_ids can see each other.

  Args:
    query_segment_id: a float `Tensor` with shape [batch, query_length].
    memory_segment_id: a float `Tensor` with shape [batch, memory_length].

  Returns:
    a `Tensor` with shape [batch, 1, query_length, memory_length].
  """
  ret = tf.to_float(
      tf.not_equal(
          tf.expand_dims(query_segment_id, 2),
          tf.expand_dims(memory_segment_id, 1))) * -1e9
  return tf.expand_dims(ret, axis=1) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:19,代码来源:common_attention.py

示例3: _create_metrics_for_keras_eval_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def _create_metrics_for_keras_eval_model(self) -> Dict[str, List[Union[Callable, keras.metrics.Metric]]]:
        top_k_acc_metrics = []
        for k in range(1, self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION + 1):
            top_k_acc_metric = partial(
                sparse_top_k_categorical_accuracy, k=k)
            top_k_acc_metric.__name__ = 'top{k}_acc'.format(k=k)
            top_k_acc_metrics.append(top_k_acc_metric)
        predicted_words_filters = [
            lambda word_strings: tf.not_equal(word_strings, self.vocabs.target_vocab.special_words.OOV),
            lambda word_strings: tf.strings.regex_full_match(word_strings, r'^[a-zA-Z\|]+$')
        ]
        words_subtokens_metrics = [
            WordsSubtokenPrecisionMetric(predicted_words_filters=predicted_words_filters, name='subtoken_precision'),
            WordsSubtokenRecallMetric(predicted_words_filters=predicted_words_filters, name='subtoken_recall'),
            WordsSubtokenF1Metric(predicted_words_filters=predicted_words_filters, name='subtoken_f1')
        ]
        return {'target_index': top_k_acc_metrics, 'target_string': words_subtokens_metrics} 
开发者ID:tech-srl,项目名称:code2vec,代码行数:19,代码来源:keras_model.py

示例4: _filter_input_rows

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def _filter_input_rows(self, *row_parts) -> tf.bool:
        row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)

        #assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
        #           {row_parts.path_source_token_indices, row_parts.path_indices,
        #            row_parts.path_target_token_indices, row_parts.context_valid_mask})

        # FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
        any_word_valid_mask_per_context_part = [
            tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
                         self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
        any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part)  # scalar

        if self.estimator_action.is_evaluate:
            cond = any_contexts_is_valid  # scalar
        else:  # training
            word_is_valid = tf.greater(
                row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV])  # scalar
            cond = tf.logical_and(word_is_valid, any_contexts_is_valid)  # scalar

        return cond  # scalar 
开发者ID:tech-srl,项目名称:code2vec,代码行数:27,代码来源:path_context_reader.py

示例5: magic_correction_term

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def magic_correction_term(y_true):
    """
    Calculate a correction term to prevent the loss being lowered by magic_num

    :param y_true: Ground Truth
    :type y_true: tf.Tensor
    :return: Correction Term
    :rtype: tf.Tensor
    :History:
        | 2018-Jan-30 - Written - Henry Leung (University of Toronto)
        | 2018-Feb-17 - Updated - Henry Leung (University of Toronto)
    """
    import tensorflow as tf
    from astroNN.config import MAGIC_NUMBER

    num_nonmagic = tf.reduce_sum(tf.cast(tf.not_equal(y_true, MAGIC_NUMBER), tf.float32), axis=-1)
    num_magic = tf.reduce_sum(tf.cast(tf.equal(y_true, MAGIC_NUMBER), tf.float32), axis=-1)

    # If no magic number, then num_zero=0 and whole expression is just 1 and get back our good old loss
    # If num_nonzero is 0, that means we don't have any information, then set the correction term to ones
    return (num_nonmagic + num_magic) / num_nonmagic 
开发者ID:henrysky,项目名称:astroNN,代码行数:23,代码来源:__init__.py

示例6: multiple_content_lookup

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def multiple_content_lookup(content, vocab_table, ids, name=None):
    """

    :param content:
    :param vocab_table:
    :param ids:
    :param name:
    :return: 2-D [batch_size, max_length_in_batch] content id matrix,
             1-D [batch_size] content len vector
    """
    with tf.name_scope(name, 'multiple_content_lookup', [content, vocab_table, ids]):
        content_list = tf.nn.embedding_lookup(content, ids)

        extracted_sparse_content = tf.string_split(content_list, delimiter=' ')

        sparse_content = tf.SparseTensor(indices=extracted_sparse_content.indices,
                                         values=vocab_table.lookup(extracted_sparse_content.values),
                                         dense_shape=extracted_sparse_content.dense_shape)

        extracted_content_ids = tf.sparse_tensor_to_dense(sparse_content,
                                                          default_value=0, name='dense_content')
        extracted_content_len = tf.reduce_sum(tf.cast(tf.not_equal(extracted_content_ids, 0), tf.int32), axis=-1)

        return extracted_content_ids, extracted_content_len 
开发者ID:bxshi,项目名称:ConMask,代码行数:26,代码来源:content.py

示例7: _common

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def _common(cls, node, **kwargs):
    attrs = copy.deepcopy(node.attrs)
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    condition = tensor_dict[node.inputs[1]]

    x = tf.reshape(x, [-1]) if node.attrs.get("axis") is None else x
    if condition.shape.is_fully_defined():
      condition_shape = condition.shape[0]
      indices = tf.constant(list(range(condition_shape)), dtype=tf.int64)
    else:
      condition_shape = tf.shape(condition, out_type=tf.int64)[0]
      indices = tf.range(condition_shape, dtype=tf.int64)
    not_zero = tf.not_equal(condition, tf.zeros_like(condition))
    attrs['indices'] = tf.boolean_mask(indices, not_zero)
    return [
        cls.make_tensor_from_onnx_node(node, inputs=[x], attrs=attrs, **kwargs)
    ] 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:20,代码来源:compress.py

示例8: summarize_features

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def summarize_features(features, num_shards=1):
  """Generate summaries for features."""
  if not common_layers.should_generate_summaries():
    return

  with tf.name_scope("input_stats"):
    for (k, v) in sorted(six.iteritems(features)):
      if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
          (v.dtype != tf.string)):
        tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
        tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
        nonpadding = tf.to_float(tf.not_equal(v, 0))
        nonpadding_tokens = tf.reduce_sum(nonpadding)
        tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
        tf.summary.scalar("%s_nonpadding_fraction" % k,
                          tf.reduce_mean(nonpadding)) 
开发者ID:yyht,项目名称:BERT,代码行数:18,代码来源:t2t_model.py

示例9: weights_multi_problem

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def weights_multi_problem(labels, taskid=-1):
  """Assign weight 1.0 to only the "targets" portion of the labels.

  Weight 1.0 is assigned to all labels past the taskid.

  Args:
    labels: A Tensor of int32s.
    taskid: an int32 representing the task id for a problem.

  Returns:
    A Tensor of floats.

  Raises:
    ValueError: The Task ID must be valid.
  """
  taskid = check_nonnegative(taskid)
  past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
  # Additionally zero out the task id location
  past_taskid *= to_float(tf.not_equal(labels, taskid))
  non_taskid = to_float(labels)
  return to_float(tf.not_equal(past_taskid * non_taskid, 0)) 
开发者ID:yyht,项目名称:BERT,代码行数:23,代码来源:common_layers.py

示例10: focal_loss_

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def focal_loss_(labels, pred, anchor_state, alpha=0.25, gamma=2.0):

    # filter out "ignore" anchors
    indices = tf.reshape(tf.where(tf.not_equal(anchor_state, -1)), [-1, ])
    labels = tf.gather(labels, indices)
    pred = tf.gather(pred, indices)

    logits = tf.cast(pred, tf.float32)
    onehot_labels = tf.cast(labels, tf.float32)
    ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=onehot_labels, logits=logits)
    predictions = tf.sigmoid(logits)
    predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions)
    alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
    alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)
    loss = ce * tf.pow(1-predictions_pt, gamma) * alpha_t
    positive_mask = tf.cast(tf.greater(labels, 0), tf.float32)
    return tf.reduce_sum(loss) / tf.maximum(tf.reduce_sum(positive_mask), 1) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:19,代码来源:losses.py

示例11: flatten_binary_scores

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def flatten_binary_scores(scores, labels, ignore=None):
    """
    Flattens predictions in the batch (binary case)
    Remove labels equal to 'ignore'
    """
    scores = tf.reshape(scores, (-1,))
    labels = tf.reshape(labels, (-1,))
    if ignore is None:
        return scores, labels
    valid = tf.not_equal(labels, ignore)
    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
    return vscores, vlabels


# --------------------------- MULTICLASS LOSSES --------------------------- 
开发者ID:sercant,项目名称:mobile-segmentation,代码行数:18,代码来源:loss.py

示例12: flatten_probas

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def flatten_probas(probas, labels, ignore=None, order='BHWC'):
    """
    Flattens predictions in the batch
    """
    if len(probas.shape) == 3:
        probas, order = tf.expand_dims(probas, 3), 'BHWC'
    if order == 'BCHW':
        probas = tf.transpose(probas, (0, 2, 3, 1), name="BCHW_to_BHWC")
        order = 'BHWC'
    if order != 'BHWC':
        raise NotImplementedError('Order {} unknown'.format(order))
    C = probas.shape[3]
    probas = tf.reshape(probas, (-1, C))
    labels = tf.reshape(labels, (-1,))
    if ignore is None:
        return probas, labels
    valid = tf.not_equal(labels, ignore)
    vprobas = tf.boolean_mask(probas, valid, name='valid_probas')
    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
    return vprobas, vlabels 
开发者ID:sercant,项目名称:mobile-segmentation,代码行数:22,代码来源:loss.py

示例13: _create_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def _create_loss(self, loss_str, fraction, logits, targets):
    raw_ce = None
    n_valid_pixels_per_im = None
    if "ce" in loss_str:
      # we need to replace the void label to avoid nan
      no_void_label_mask = tf.not_equal(targets, VOID_LABEL)
      targets_no_void = tf.where(no_void_label_mask, targets, tf.zeros_like(targets))
      raw_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets_no_void, name="ce")
      # set the loss to 0 for the void label pixels
      raw_ce *= tf.cast(no_void_label_mask, tf.float32)
      n_valid_pixels_per_im = tf.reduce_sum(tf.cast(no_void_label_mask, tf.int32), axis=[1, 2])

    if loss_str == "ce":
      ce_per_im = tf.reduce_sum(raw_ce, axis=[1, 2])
      ce_per_im /= tf.cast(tf.maximum(n_valid_pixels_per_im, 1), tf.float32)
      ce_total = tf.reduce_mean(ce_per_im, axis=0)
      loss = ce_total
    elif loss_str == "bootstrapped_ce":
      loss = bootstrapped_ce_loss(raw_ce, fraction, n_valid_pixels_per_im)
    elif loss_str == "class_balanced_ce":
      loss = class_balanced_ce_loss(raw_ce, targets, self.n_classes)
    else:
      assert False, ("unknown loss", loss_str)
    return loss 
开发者ID:tobiasfshr,项目名称:MOTSFusion,代码行数:26,代码来源:SegmentationOutputLayers.py

示例14: _rpn_box_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def _rpn_box_loss(box_outputs, box_targets, normalizer=1.0, delta=1./9):
  """Computes box regression loss."""
  # delta is typically around the mean value of regression target.
  # for instances, the regression targets of 512x512 input with 6 anchors on
  # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
  with tf.name_scope('rpn_box_loss'):
    mask = tf.not_equal(box_targets, 0.0)
    # The loss is normalized by the sum of non-zero weights before additional
    # normalizer provided by the function caller.
    box_loss = tf.losses.huber_loss(
        box_targets,
        box_outputs,
        weights=mask,
        delta=delta,
        reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
    box_loss /= normalizer
    return box_loss 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:19,代码来源:mask_rcnn_model.py

示例15: char_accuracy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import not_equal [as 别名]
def char_accuracy(predictions, targets, rej_char, streaming=False):
  """Computes character level accuracy.

  Both predictions and targets should have the same shape
  [batch_size x seq_length].

  Args:
    predictions: predicted characters ids.
    targets: ground truth character ids.
    rej_char: the character id used to mark an empty element (end of sequence).
    streaming: if True, uses the streaming mean from the slim.metric module.

  Returns:
    a update_ops for execution and value tensor whose value on evaluation
    returns the total character accuracy.
  """
  with tf.variable_scope('CharAccuracy'):
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())

    targets = tf.to_int32(targets)
    const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
    weights = tf.to_float(tf.not_equal(targets, const_rej_char))
    correct_chars = tf.to_float(tf.equal(predictions, targets))
    accuracy_per_example = tf.div(
        tf.reduce_sum(tf.multiply(correct_chars, weights), 1),
        tf.reduce_sum(weights, 1))
    if streaming:
      return tf.contrib.metrics.streaming_mean(accuracy_per_example)
    else:
      return tf.reduce_mean(accuracy_per_example) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:32,代码来源:metrics.py


注:本文中的tensorflow.not_equal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。