当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.argmin方法代码示例

本文整理汇总了Python中tensorflow.argmin方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.argmin方法的具体用法?Python tensorflow.argmin怎么用?Python tensorflow.argmin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.argmin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_pulling_indices

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def get_pulling_indices(self, weight):
    clst_num = self.cluster_centroids.shape[0]
    tiled_weights = tf.tile(tf.expand_dims(weight, 4), [1, 1, 1, 1, clst_num])

    # Do the ugly reshape to the clustering points
    tiled_cluster_centroids = tf.stack(
        [tf.tile(tf.stack(
            [tf.reshape(self.cluster_centroids, [1, 1, clst_num])] *
            weight.shape[-2], axis=2),
                 [weight.shape[0], weight.shape[1], 1, 1])] * weight.shape[-1],
        axis=3)

    # We find the nearest cluster centroids and store them so that ops can build
    # their kernels upon it
    pulling_indices = tf.argmin(
        tf.abs(tiled_weights - tiled_cluster_centroids), axis=4
    )

    return pulling_indices 
开发者ID:tensorflow,项目名称:model-optimization,代码行数:21,代码来源:clustering_registry.py

示例2: vq

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def vq(z_e):
    '''Vector Quantization.

    Args:
      z_e: encoded variable. [B, t, D].

    Returns:
      z_q: nearest embeddings. [B, t, D].
    '''
    with tf.variable_scope("vq"):
        lookup_table = tf.get_variable('lookup_table',
                                       dtype=tf.float32,
                                       shape=[hp.K, hp.D],
                                       initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
        z = tf.expand_dims(z_e, -2) # (B, t, 1, D)
        lookup_table_ = tf.reshape(lookup_table, [1, 1, hp.K, hp.D]) # (1, 1, K, D)
        dist = tf.norm(z - lookup_table_, axis=-1) # Broadcasting -> (B, T', K)
        k = tf.argmin(dist, axis=-1) # (B, t)
        z_q = tf.gather(lookup_table, k) # (B, t, D)

    return z_q 
开发者ID:Kyubyong,项目名称:vq-vae,代码行数:23,代码来源:networks.py

示例3: stepll_adversarial_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def stepll_adversarial_images(x, eps):
  """One step towards least likely class (Step L.L.) adversarial examples.

  This method is an alternative to FGSM which does not use true classes.
  Method is described in the "Adversarial Machine Learning at Scale" paper,
  https://arxiv.org/abs/1611.01236

  Args:
    x: source images
    eps: size of adversarial perturbation

  Returns:
    adversarial images
  """
  logits, _ = create_model(x, reuse=True)
  least_likely_class = tf.argmin(logits, 1)
  one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES)
  return step_target_class_adversarial_images(x, eps, one_hot_ll_class) 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:20,代码来源:eval_on_adversarial.py

示例4: stepllnoise_adversarial_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def stepllnoise_adversarial_images(x, eps):
  """Step L.L. with noise method.

  This is an imporvement of Step L.L. method. This method is better against
  adversarially trained models which learn to mask gradient.
  Method is described in the section "New randomized one shot attack" of
  "Ensemble Adversarial Training: Attacks and Defenses" paper,
  https://arxiv.org/abs/1705.07204

  Args:
    x: source images
    eps: size of adversarial perturbation

  Returns:
    adversarial images
  """
  logits, _ = create_model(x, reuse=True)
  least_likely_class = tf.argmin(logits, 1)
  one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES)
  x_noise = x + eps / 2 * tf.sign(tf.random_normal(x.shape))
  return step_target_class_adversarial_images(x_noise, eps / 2,
                                              one_hot_ll_class) 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:24,代码来源:eval_on_adversarial.py

示例5: _compute_one_image_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def _compute_one_image_loss(self, keypoints, offset, size, ground_truth, meshgrid_y, meshgrid_x,
                                stride, pshape):
        slice_index = tf.argmin(ground_truth, axis=0)[0]
        ground_truth = tf.gather(ground_truth, tf.range(0, slice_index, dtype=tf.int64))
        ngbbox_y = ground_truth[..., 0] / stride
        ngbbox_x = ground_truth[..., 1] / stride
        ngbbox_h = ground_truth[..., 2] / stride
        ngbbox_w = ground_truth[..., 3] / stride
        class_id = tf.cast(ground_truth[..., 4], dtype=tf.int32)
        ngbbox_yx = ground_truth[..., 0:2] / stride
        ngbbox_yx_round = tf.floor(ngbbox_yx)
        offset_gt = ngbbox_yx - ngbbox_yx_round
        size_gt = ground_truth[..., 2:4] / stride
        ngbbox_yx_round_int = tf.cast(ngbbox_yx_round, tf.int64)
        keypoints_loss = self._keypoints_loss(keypoints, ngbbox_yx_round_int, ngbbox_y, ngbbox_x, ngbbox_h,
                                              ngbbox_w, class_id, meshgrid_y, meshgrid_x, pshape)

        offset = tf.gather_nd(offset, ngbbox_yx_round_int)
        size = tf.gather_nd(size, ngbbox_yx_round_int)
        offset_loss = tf.reduce_mean(tf.abs(offset_gt - offset))
        size_loss = tf.reduce_mean(tf.abs(size_gt - size))
        total_loss = keypoints_loss + 0.1*size_loss + offset_loss
        return total_loss 
开发者ID:Stick-To,项目名称:CenterNet-tensorflow,代码行数:25,代码来源:CenterNet.py

示例6: nn_distance_cpu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def nn_distance_cpu(pc1, pc2):
    '''
    Input:
        pc1: float TF tensor in shape (B,N,C) the first point cloud
        pc2: float TF tensor in shape (B,M,C) the second point cloud
    Output:
        dist1: float TF tensor in shape (B,N) distance from first to second
        idx1: int32 TF tensor in shape (B,N) nearest neighbor from first to second
        dist2: float TF tensor in shape (B,M) distance from second to first
        idx2: int32 TF tensor in shape (B,M) nearest neighbor from second to first
    '''
    N = pc1.get_shape()[1].value
    M = pc2.get_shape()[1].value
    pc1_expand_tile = tf.tile(tf.expand_dims(pc1,2), [1,1,M,1])
    pc2_expand_tile = tf.tile(tf.expand_dims(pc2,1), [1,N,1,1])
    pc_diff = pc1_expand_tile - pc2_expand_tile # B,N,M,C
    pc_dist = tf.reduce_sum(pc_diff ** 2, axis=-1) # B,N,M
    dist1 = tf.reduce_min(pc_dist, axis=2) # B,N
    idx1 = tf.argmin(pc_dist, axis=2) # B,N
    dist2 = tf.reduce_min(pc_dist, axis=1) # B,M
    idx2 = tf.argmin(pc_dist, axis=1) # B,M
    return dist1, idx1, dist2, idx2 
开发者ID:hehefan,项目名称:PointRNN,代码行数:24,代码来源:tf_nndistance_cpu.py

示例7: computeLoss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def computeLoss(y_query, distances, class_ids, N_classes):#, N_query):

    logits = -1.0*distances

    local_class_ids = tf.argmin(distances, axis = 1)

    y_pred = tf.gather(class_ids, local_class_ids)

    labels = tf.zeros_like(y_query)
    for i,c in enumerate(tf.unstack(class_ids)):
        #print(i)
        mask = tf.expand_dims(tf.cast(tf.equal(y_query,c), tf.int64), axis = 1)
        mask = tf.reshape(mask, [-1])
        labels = labels + mask*(i % N_classes)

    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = logits)
    loss = loss / (N_classes)

    loss = tf.reduce_mean(loss)

    return loss, y_pred 
开发者ID:stanislavfort,项目名称:gaussian-prototypical-networks,代码行数:23,代码来源:cnn6.py

示例8: k_means

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def k_means(image, clusters_num):
    image = tf.squeeze(image)
    print("k_means", image.shape)
    _points = tf.reshape(image, (-1, 1))
    centroids = tf.slice(tf.random_shuffle(_points), [0, 0], [clusters_num, -1])
    points_expanded = tf.expand_dims(_points, 0)

    for i in xrange(80):
        centroids_expanded = tf.expand_dims(centroids, 1)
        distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
        assignments = tf.argmin(distances, 0)
        centroids = tf.concat(
            [tf.reduce_mean(tf.gather(_points, tf.reshape(tf.where(tf.equal(assignments, c)), [1, -1])), axis=1) for c
             in
             xrange(clusters_num)], 0)

    centroids = tf.squeeze(centroids)
    centroids = -tf.nn.top_k(-centroids, clusters_num)[0]  # sort
    return centroids 
开发者ID:JianqiangRen,项目名称:AAMS,代码行数:21,代码来源:utils.py

示例9: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def __call__(self, codes):
    """Uses codebook to find nearest neighbor for each code.

    Args:
      codes: A `float`-like `Tensor` containing the latent
        vectors to be compared to the codebook. These are rank-3 with shape
        `[batch_size, latent_size, code_size]`.

    Returns:
      nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for
        each code in the batch.
      one_hot_assignments: The one-hot vectors corresponding to the matched
        codebook entry for each code in the batch.
    """
    distances = tf.norm(
        tensor=tf.expand_dims(codes, 2) -
        tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size]),
        axis=3)
    assignments = tf.argmin(input=distances, axis=2)
    one_hot_assignments = tf.one_hot(assignments, depth=self.num_codes)
    nearest_codebook_entries = tf.reduce_sum(
        input_tensor=tf.expand_dims(one_hot_assignments, -1) *
        tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size]),
        axis=2)
    return nearest_codebook_entries, one_hot_assignments 
开发者ID:GoogleCloudPlatform,项目名称:ml-on-gcp,代码行数:27,代码来源:vq_vae.py

示例10: get_items_to_encode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def get_items_to_encode(self, end_points, data_batched):
    """Outputs a list with format (name, is_image, tensor)"""
    items_to_encode = []
    if 'source' in data_batched:
      items_to_encode.append(('sources', True, self._post_process_image(data_batched.get('source'))))
    generated_targets = end_points['generator_output']
    generated_target_prediction = end_points['discriminator_generated_prediction']
    real_target_prediction = end_points['discriminator_real_prediction']
    targets = data_batched.get('target')
    items_to_encode.append(('targets', True, self._post_process_image(targets)))
    items_to_encode.append(('generated_targets', True, self._post_process_image(generated_targets)))
    items_to_encode.append(('generated_target_prediction', False, generated_target_prediction))
    items_to_encode.append(('real_target_prediction', False, real_target_prediction))

    best_generated_target_i = tf.argmax(tf.squeeze(generated_target_prediction, axis=1))
    worst_real_target_i = tf.argmin(tf.squeeze(real_target_prediction, axis=1))

    items_to_encode.append(
      ('best_generated_target', True, self._post_process_image(generated_targets[best_generated_target_i])))
    items_to_encode.append(('worst_real_target', True, self._post_process_image(targets[worst_real_target_i])))
    return items_to_encode 
开发者ID:jerryli27,项目名称:TwinGAN,代码行数:23,代码来源:image_generation.py

示例11: add_summary_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def add_summary_images(self, num=9):
    """Visualize source images and nearest neighbors from target."""
    source_ims = self.source_gen.get_batch(bs=num, reuse=True)
    vis_images = self.add_summary_montage(source_ims, 'source_ims', num)

    target_ims = self.target_gen.get_batch()
    _ = self.add_summary_montage(target_ims, 'target_ims', num)

    c_xy = self.basedist(source_ims, target_ims)  # pairwise cost
    idx = tf.argmin(c_xy, axis=1)  # find nearest neighbors
    matches = tf.gather(target_ims, idx)
    vis_matches = self.add_summary_montage(matches, 'neighbors_ims', num)

    vis_both = tf.concat([vis_images, vis_matches], axis=1)
    tf.summary.image('matches_ims', vis_both)

    return 
开发者ID:google,项目名称:wasserstein-dist,代码行数:19,代码来源:wasserstein.py

示例12: cf_nn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def cf_nn(x, t):
    It = tf.where(tf.equal(t, 1))[:, 0]
    Ic = tf.where(tf.equal(t, 0))[:, 0]

    x_c = tf.gather(x, Ic)
    x_t = tf.gather(x, It)

    D = pdist2(x_c, x_t)

    nn_t = tf.gather(Ic, tf.argmin(D, 0))
    nn_c = tf.gather(It, tf.argmin(D, 1))

    return tf.stop_gradient(nn_t), tf.stop_gradient(nn_c)


# SOURCE: https://github.com/clinicalml/cfrnet, MIT-License 
开发者ID:d909b,项目名称:perfect_match,代码行数:18,代码来源:pehe_loss.py

示例13: predict_labels

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def predict_labels(logits):
    """ Predict self labels
    logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
    Return [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
    """
    with tf.variable_scope('PredictLabels') as scope:
        # Reshape logits for argmax and argmin
        logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
        # Get labels predicted using these logits
        logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
        logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length])  # [FLAGS.batch_size, FLAGS.max_doc_length]
        logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
        
        logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
        logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length])  # [FLAGS.batch_size, FLAGS.max_doc_length]
        logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
        
        # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
        labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
        dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
        labels = tf.cast(labels, dtype)
        
        return labels 
开发者ID:EdinburghNLP,项目名称:Refresh,代码行数:25,代码来源:model_docsum.py

示例14: multilabel_image_to_class

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def multilabel_image_to_class(label_image: tf.Tensor, classes_file: str) -> tf.Tensor:
    """
    Combines image annotations with classes info of the txt file to create the input label for the training.

    :param label_image: annotated image [H,W,Ch] or [B,H,W,Ch] (Ch = color channels)
    :param classes_file: the filename of the txt file containing the class info
    :return: [H,W,Cl] or [B,H,W,Cl] (Cl = number of classes)
    """
    classes_color_values, colors_labels = get_classes_color_from_file_multilabel(classes_file)
    # Convert label_image [H,W,3] to the classes [H,W,C],int32 according to the classes [C,3]
    with tf.name_scope('LabelAssign'):
        if len(label_image.get_shape()) == 3:
            diff = tf.cast(label_image[:, :, None, :], tf.float32) - tf.constant(classes_color_values[None, None, :, :])  # [H,W,C,3]
        elif len(label_image.get_shape()) == 4:
            diff = tf.cast(label_image[:, :, :, None, :], tf.float32) - tf.constant(
                classes_color_values[None, None, None, :, :])  # [B,H,W,C,3]
        else:
            raise NotImplementedError('Length is : {}'.format(len(label_image.get_shape())))

        pixel_class_diff = tf.reduce_sum(tf.square(diff), axis=-1)  # [H,W,C] or [B,H,W,C]
        class_label = tf.argmin(pixel_class_diff, axis=-1)  # [H,W] or [B,H,W]

        return tf.gather(colors_labels, class_label) > 0 
开发者ID:dhlab-epfl,项目名称:dhSegment,代码行数:25,代码来源:labels.py

示例15: nn_distance_cpu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argmin [as 别名]
def nn_distance_cpu(pc1, pc2):
    '''
    Input:
        pc1: float TF tensor in shape (B,N,C) the first point cloud
        pc2: float TF tensor in shape (B,M,C) the second point cloud
    Output:
        dist1: float TF tensor in shape (B,N) distance from first to second
        idx1: int32 TF tensor in shape (B,N) nearest neighbor from first to second
        dist2: float TF tensor in shape (B,M) distance from second to first
        idx2: int32 TF tensor in shape (B,M) nearest neighbor from second to first
    '''
    N = pc1.get_shape()[1].value
    M = pc2.get_shape()[1].value
    pc1_expand_tile = tf.tile(tf.expand_dims(pc1, 2), [1, 1, M, 1])
    pc2_expand_tile = tf.tile(tf.expand_dims(pc2, 1), [1, N, 1, 1])
    pc_diff = pc1_expand_tile - pc2_expand_tile  # B,N,M,C
    pc_dist = tf.reduce_sum(pc_diff ** 2, axis=-1)  # B,N,M
    dist1 = tf.reduce_min(pc_dist, axis=2)  # B,N
    idx1 = tf.argmin(pc_dist, axis=2)  # B,N
    dist2 = tf.reduce_min(pc_dist, axis=1)  # B,M
    idx2 = tf.argmin(pc_dist, axis=1)  # B,M
    return dist1, idx1, dist2, idx2 
开发者ID:kujason,项目名称:monopsr,代码行数:24,代码来源:tf_nndistance_cpu.py


注:本文中的tensorflow.argmin方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。