当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.gather_nd方法代码示例

本文整理汇总了Python中tensorflow.gather_nd方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.gather_nd方法的具体用法?Python tensorflow.gather_nd怎么用?Python tensorflow.gather_nd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.gather_nd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: remove

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def remove(self, x):
    """Remove padding from the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_origin,...]

    Returns:
      a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
    """
    with tf.name_scope("pad_reduce/remove"):
      x_shape = x.get_shape().as_list()
      x = tf.gather_nd(
          x,
          indices=self.nonpad_ids,
      )
      if not tf.contrib.eager.in_eager_mode():
        # This is a hack but for some reason, gather_nd return a tensor of
        # undefined shape, so the shape is set up manually
        x.set_shape([None] + x_shape[1:])
    return x 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:22,代码来源:expert_utils.py

示例2: argmax_with_score

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def argmax_with_score(logits, axis=None):
  """Argmax along with the value."""
  axis = axis or len(logits.get_shape()) - 1
  predictions = tf.argmax(logits, axis=axis)

  logits_shape = shape_list(logits)
  prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
  prefix_size = 1
  for d in prefix_shape:
    prefix_size *= d

  # Flatten to extract scores
  flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
  flat_predictions = tf.reshape(predictions, [prefix_size])
  flat_indices = tf.stack(
      [tf.range(tf.to_int64(prefix_size)),
       tf.to_int64(flat_predictions)],
      axis=1)
  flat_scores = tf.gather_nd(flat_logits, flat_indices)

  # Unflatten
  scores = tf.reshape(flat_scores, prefix_shape)

  return predictions, scores 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:26,代码来源:common_layers.py

示例3: select_dim_value

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def select_dim_value(x, indices, name=None):
    with tf.name_scope(name, "select-dim-value", values=[x, indices]):
        # x.shape = (rest..., dims)
        rest = tf.shape(x)[:-1]
        dims = tf.shape(x)[-1]
        size = tf.size(indices, out_type=indices.dtype)

        # reshape to (size, dims)
        t = tf.reshape(x, shape=[-1, dims])
        # then index as ([1,2,3,...,size], indices.ravel())
        nd_indices = tf.stack([
            tf.range(0, size, dtype=indices.dtype),
            tf.reshape(indices, shape=[-1])
        ], axis=1)
        t = tf.gather_nd(t, indices=nd_indices)

        # reshape back to (rest...)
        t = tf.reshape(t, rest)
        t.set_shape(x.get_shape()[:-1])
        return t 
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:22,代码来源:select_dim_value.py

示例4: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def call(self, inputs):
    """Standard Keras call() method."""
    if inputs.dtype not in [tf.uint8, tf.int32, tf.int64]:
      inputs = tf.cast(inputs, dtype=tf.int32)

    if self.default_input_value is not None:
      default_input_value_tensor = tf.constant(
          int(self.default_input_value),
          dtype=inputs.dtype,
          name=DEFAULT_INPUT_VALUE_NAME)
      replacement = tf.zeros_like(inputs) + (self.num_buckets - 1)
      inputs = tf.where(
          tf.equal(inputs, default_input_value_tensor), replacement, inputs)

    # We can't use tf.gather_nd(self.kernel, inputs) as it doesn't support
    # constraints (constraint functions are not supported for IndexedSlices).
    # Instead we use matrix multiplication by one-hot encoding of the index.
    if self.units == 1:
      # This can be slightly faster as it uses matmul.
      return tf.matmul(
          tf.one_hot(tf.squeeze(inputs, axis=[-1]), depth=self.num_buckets),
          self.kernel)
    return tf.reduce_sum(
        tf.one_hot(inputs, axis=1, depth=self.num_buckets) * self.kernel,
        axis=1) 
开发者ID:tensorflow,项目名称:lattice,代码行数:27,代码来源:categorical_calibration_layer.py

示例5: _get_prediction_from_topk

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def _get_prediction_from_topk(self, topk_predicted_words):
        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
开发者ID:tech-srl,项目名称:code2vec,代码行数:21,代码来源:keras_words_subtoken_metrics.py

示例6: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def call(self, y_pred, **kwargs):
        y_pred.shape.assert_has_rank(2)
        top_k_pred_indices = tf.cast(tf.nn.top_k(y_pred, k=self.top_k).indices,
                                     dtype=self.index_to_word_table.key_dtype)
        predicted_target_words_strings = self.index_to_word_table.lookup(top_k_pred_indices)

        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(top_k_pred_indices, predicted_target_words_strings) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(top_k_pred_indices), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(predicted_target_words_strings,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
开发者ID:tech-srl,项目名称:code2vec,代码行数:26,代码来源:keras_word_prediction_layer.py

示例7: rpn_class_loss_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:24,代码来源:model.py

示例8: calculate_model_precision

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def calculate_model_precision(input_tensor, label_tensor):
    """
    calculate accuracy acc = correct_nums / ground_truth_nums
    :param input_tensor: binary segmentation logits
    :param label_tensor: binary segmentation label
    :return:
    """

    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(label_tensor, idx)
    accuracy = tf.count_nonzero(pix_cls_ret)
    accuracy = tf.divide(
        accuracy,
        tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64))

    return accuracy 
开发者ID:MaybeShewill-CV,项目名称:lanenet-lane-detection,代码行数:21,代码来源:evaluate_model_utils.py

示例9: calculate_model_fp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def calculate_model_fp(input_tensor, label_tensor):
    """
    calculate fp figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero(
        tf.gather_nd(label_tensor, idx)
    )

    return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64)) 
开发者ID:MaybeShewill-CV,项目名称:lanenet-lane-detection,代码行数:19,代码来源:evaluate_model_utils.py

示例10: calculate_model_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def calculate_model_fn(input_tensor, label_tensor):
    """
    calculate fn figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(label_tensor, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1)))
    mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret)

    return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64)) 
开发者ID:MaybeShewill-CV,项目名称:lanenet-lane-detection,代码行数:18,代码来源:evaluate_model_utils.py

示例11: remove

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def remove(self, x):
        """Remove padding from the given tensor.

        Args:
            x: A Tensor of shape [dim_origin,...]

        Returns:
            A tensor of shape [dim_compressed,...] with dim_compressed
            <= dim_origin
        """
        with tf.name_scope("pad_reduce/remove"):
            x_shape = x.get_shape().as_list()
            x = tf.gather_nd(
                x,
                indices=self.nonpad_ids,
            )
            #if not context.in_eager_mode():
            # This is a hack but for some reason, gather_nd return a tensor of
            # undefined shape, so the shape is set up manually
            x.set_shape([None] + x_shape[1:])
        return x 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:23,代码来源:transformer_utils.py

示例12: extract_dense_weights

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def extract_dense_weights(sess):
    for key in dense_layers.keys():
        layer = dense_layers[key]

        # sparse kernel
        dense_kernel = layer.kernel
        dense_kernel_shape = dense_kernel.get_shape().as_list()
        # dense_kernel = tf.reshape(dense_kernel, [dense_kernel_shape[0] * dense_kernel_shape[1] * dense_kernel_shape[2],
        #                                          dense_kernel_shape[3]])
        # dense_kernel = tf.transpose(dense_kernel)
        idx = tf.where(tf.not_equal(dense_kernel, 0))
        sparse_kernel = tf.SparseTensor(idx, tf.gather_nd(dense_kernel, idx), dense_kernel.get_shape())

        if layer.bias is not None:
            dk, k, b = sess.run([dense_kernel, sparse_kernel, layer.bias])
        else:
            dk, k = sess.run([dense_kernel, sparse_kernel])
            b = None
        dense_weights['%s/%s' % (key, 'kernel_dense')] = dk
        dense_weights['%s/%s' % (key, 'kernel')] = k
        dense_weights['%s/%s' % (key, 'kernel_shape')] = dense_kernel_shape
        dense_weights['%s/%s' % (key, 'bias')] = b 
开发者ID:ildoonet,项目名称:tf-lcnn,代码行数:24,代码来源:LookupConvolution2d.py

示例13: index_each

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def index_each(a, ix):
    """Do a batched indexing operation: index row i of a by ix[i]

    In the simple case (a is >=2D and ix is 1D), returns [row[i] for row, i in zip(a, ix)].

    If ix has more dimensions, multiple lookups will be done at each batch index.
    For instance, if ix is 2D, returns [[row[i] for i in ix_row] for row, ix_row in zip(a, ix)].

    Always indexes into dimension 1 of a.
    """
    a = tf.convert_to_tensor(a, name='a')
    ix = tf.convert_to_tensor(ix, name='ix', dtype=tf.int32)
    with tf.name_scope('index_each', values=[a, ix]) as scope:
        a.shape[:1].assert_is_compatible_with(ix.shape[:1])
        i0 = tf.range(tf.shape(a)[0], dtype=ix.dtype)
        if ix.shape.rank > 1:
            i0 = tf.tile(tf.reshape(i0, (-1,) + (1,)*(ix.shape.rank - 1)), tf.concat([[1], tf.shape(ix)[1:]], axis=0))
        return tf.gather_nd(a, tf.stack([i0, ix], axis=-1), name=scope) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:20,代码来源:core.py

示例14: take_top_p_logits

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def take_top_p_logits(logits, p):
    """Nucleus sampling"""
    batch, sequence, _ = logits.shape.as_list()
    sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
    cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
    indices = tf.stack([
        tf.range(0, batch)[:, tf.newaxis],
        tf.range(0, sequence)[tf.newaxis, :],
        # number of indices to include
        tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
    ], axis=-1)
    min_values = tf.gather_nd(sorted_logits, indices)
    return tf.where(
        logits < min_values,
        tf.ones_like(logits) * -1e10,
        logits,
    ) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:19,代码来源:core.py

示例15: SampleRandomFrames

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gather_nd [as 别名]
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index) 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:22,代码来源:model_utils.py


注:本文中的tensorflow.gather_nd方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。