当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.arg_max方法代码示例

本文整理汇总了Python中tensorflow.arg_max方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.arg_max方法的具体用法?Python tensorflow.arg_max怎么用?Python tensorflow.arg_max使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.arg_max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_output

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def build_output(self, inputs, inferences):
    scores = tf.nn.softmax(inferences, name='scores')
    tf.add_to_collection('outputs', scores)

    with tf.name_scope('labels'):
      label_indices = tf.arg_max(inferences, 1, name='arg_max')
      labels = self.classification.output_labels(label_indices)
      tf.add_to_collection('outputs', labels)

    keys = self.classification.keys(inputs)
    if keys:
      # Key feature, if it exists, is a passthrough to the output.
      # The use of identity is to name the tensor and correspondingly the output field.
      keys = tf.identity(keys, name='key')
      tf.add_to_collection('outputs', keys)

    return {
      'label': labels,
      'score': scores
    } 
开发者ID:TensorLab,项目名称:tensorfx,代码行数:22,代码来源:_ff.py

示例2: max_sentence_similarity

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def max_sentence_similarity(sentence_input, similarity_matrix):
    """
    Parameters
    ----------
    sentence_input: Tensor
        Tensor of shape (batch_size, num_sentence_words, rnn_hidden_dim).

    similarity_matrix: Tensor
        Tensor of shape (batch_size, num_sentence_words, num_sentence_words).
    """
    # Shape: (batch_size, passage_len)
    def single_instance(inputs):
        single_sentence = inputs[0]
        argmax_index = inputs[1]
        # Shape: (num_sentence_words, rnn_hidden_dim)
        return tf.gather(single_sentence, argmax_index)

    question_index = tf.arg_max(similarity_matrix, 2)
    elems = (sentence_input, question_index)
    # Shape: (batch_size, num_sentence_words, rnn_hidden_dim)
    return tf.map_fn(single_instance, elems, dtype="float") 
开发者ID:nelson-liu,项目名称:paraphrase-id-tensorflow,代码行数:23,代码来源:matching.py

示例3: initialize_neural_network_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def initialize_neural_network_model(self, X_train):
        # Create placeholders
        self.features_pl = tf.placeholder(tf.float32, [None, len(X_train[0])], 'features')
        self.stances_pl = tf.placeholder(tf.int64, [None], 'stances')
        self.keep_prob_pl = tf.placeholder(tf.float32)

        # Infer batch size
        self.batch_size = tf.shape(self.features_pl)[0]

        # Define multi-layer perceptron
        self.hidden_layer = tf.nn.dropout(tf.nn.relu(tf.contrib.layers.linear(self.features_pl, self.hidden_size)), keep_prob=self.keep_prob_pl)
        self.logits_flat = tf.nn.dropout(tf.contrib.layers.linear(self.hidden_layer, self.target_size), keep_prob=self.keep_prob_pl)
        self.logits = tf.reshape(self.logits_flat, [self.batch_size, self.target_size])

        # Define L2 loss
        self.tf_vars = tf.trainable_variables()
        self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.tf_vars if 'bias' not in v.name]) * self.l2_alpha

        # Define overall loss
        self.loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.stances_pl) + self.l2_loss)

        # Define prediction
        self.softmaxed_logits = tf.nn.softmax(self.logits)
        self.predict_value = tf.arg_max(self.softmaxed_logits, 1) 
开发者ID:UKPLab,项目名称:coling2018_fake-news-challenge,代码行数:26,代码来源:riedel_mlp.py

示例4: batch_iou_

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def batch_iou_(anchors, bboxes):
  """ Compute iou of two batch of boxes. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    anchors: know shape
    bboxes: dynamic shape
  Return:
    ious: 2-D with shape '[num_bboxes, num_anchors]'
    indices: [num_bboxes, 1]
  """
  num_anchors = anchors.get_shape().as_list()[0]
  ious_list = []
  for i in range(num_anchors):
    anchor = anchors[i]
    _ious = batch_iou(bboxes, anchor)
    ious_list.append(_ious)
  ious = tf.stack(ious_list, axis=0)
  ious = tf.transpose(ious)

  indices = tf.arg_max(ious, dimension=1)

  return ious, indices 
开发者ID:Zehaos,项目名称:MobileNet,代码行数:23,代码来源:mobilenetdet.py

示例5: test_batch_iou

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def test_batch_iou(self):
    with self.test_session() as sess:
      anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH],
                            fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH])
      anchors_shape = anchors.get_shape().as_list()
      fea_h = anchors_shape[0]
      fea_w = anchors_shape[1]
      num_anchors = anchors_shape[2] * fea_h * fea_w
      anchors = tf.reshape(anchors, [num_anchors, 4])  # reshape anchors
      anchors = xywh_to_yxyx(anchors)
      bbox = tf.constant([0.75, 0.75, 0.2, 0.2], dtype=tf.float32)
      bbox = xywh_to_yxyx(bbox)
      iou = batch_iou(anchors, bbox)
      anchor_idx = tf.arg_max(iou, dimension=0)
      anchors, output, anchor_idx = sess.run([anchors, iou, anchor_idx])
      print(anchors)
      print(output)
      print(anchor_idx) 
开发者ID:Zehaos,项目名称:MobileNet,代码行数:20,代码来源:mobilenetdet_test.py

示例6: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def __call__(self, sess, epoch, iteration, model, loss, processed):
        if iteration == 0 and epoch % self.at_every_epoch == 0:
            total = 0
            correct = 0
            for values in self.batcher:
                total += len(values[-1])
                feed_dict = {}
                for i in range(0, len(self.placeholders)):
                    feed_dict[self.placeholders[i]] = values[i]
                truth = np.argmax(values[-1], 1)
                predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1),
                                     feed_dict=feed_dict)
                correct += sum(truth == predicted)
            acc = float(correct) / total
            self.update_summary(sess, iteration, ACCURACY_TRACE_TAG, acc)
            print("Epoch " + str(epoch) +
                  "\tAcc " + str(acc) +
                  "\tCorrect " + str(correct) + "\tTotal " + str(total)) 
开发者ID:uclnlp,项目名称:pycodesuggest,代码行数:20,代码来源:hooks.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def __init__(self, is_training=True):
        self.graph = tf.Graph()
        with self.graph.as_default():
            if is_training:
                self.x, self.y, self.num_batch = get_batch()
            else: # Evaluation
                self.x = tf.placeholder(tf.int32, shape=(None, hp.max_len,))
                self.y = tf.placeholder(tf.int32, shape=(None, hp.max_len,))
            
            # Character Embedding for x
            self.enc = embed(self.x, len(roma2idx), hp.embed_size, scope="emb_x")
                
            # Encoder
            self.memory = encode(self.enc, is_training=is_training)
            
            # Character Embedding for decoder_inputs
            self.decoder_inputs = shift_by_one(self.y)
            self.dec = embed(self.decoder_inputs, len(surf2idx), hp.embed_size, scope="emb_decoder_inputs")
            
            # Decoder
            self.outputs = decode(self.dec, self.memory, len(surf2idx), is_training=is_training) # (N, T', hp.n_mels*hp.r)
            self.logprobs = tf.log(tf.nn.softmax(self.outputs)+1e-10) 
            self.preds = tf.arg_max(self.outputs, dimension=-1)
                
            if is_training: 
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs) 
                self.istarget = tf.to_float(tf.not_equal(self.y, tf.zeros_like(self.y))) # masking
                self.mean_loss = tf.reduce_sum(self.loss * self.istarget) / (tf.reduce_sum(self.istarget))
               
                # Training Scheme
                self.global_step = tf.Variable(0, name='global_step', trainable=False)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr)
                self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
                   
                # Summary 
                tf.summary.scalar('mean_loss', self.mean_loss)
                self.merged = tf.summary.merge_all() 
开发者ID:Kyubyong,项目名称:neural_japanese_transliterator,代码行数:39,代码来源:train.py

示例8: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def __init__(self, is_training=True):
        self.graph = tf.Graph()
        self.is_training=is_training
        with self.graph.as_default():
            if is_training:
                self.x, self.y, self.num_batch = get_batch() 
            else: # Evaluation
                self.x = tf.placeholder(tf.float32, shape=(None, None, hp.n_mels*hp.r))
                self.y = tf.placeholder(tf.int32, shape=(None, hp.max_len))
            
            self.decoder_inputs = embed(shift_by_one(self.y), len(char2idx), hp.embed_size) # (N, T', E)
            
            with tf.variable_scope('net'):
                # Encoder
                self.memory = encode(self.x, is_training=is_training) # (N, T, hp.n_mels*hp.r)
                
                # Decoder
                self.outputs = decode(self.decoder_inputs, self.memory, is_training=is_training) # (N, T', E)
                self.logprobs = tf.log(tf.nn.softmax(self.outputs)+1e-10) 
                self.preds = tf.arg_max(self.outputs, dimension=-1)
                
            if is_training:  
                # Loss
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs)
                
                # Target masking
                self.istarget = tf.to_float(tf.not_equal(self.y, 0))
                self.mean_loss = tf.reduce_sum(self.loss*self.istarget) / (tf.reduce_sum(self.istarget) + 1e-7)
                
                # Training Scheme
                self.global_step = tf.Variable(0, name='global_step', trainable=False)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr)
                self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
                   
                # Summary 
                tf.summary.scalar('mean_loss', self.mean_loss)
                self.merged = tf.summary.merge_all() 
开发者ID:Kyubyong,项目名称:tacotron_asr,代码行数:39,代码来源:train.py

示例9: predict

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def predict(images, exp_config):
    '''
    Returns the prediction for an image given a network from the model zoo
    :param images: An input image tensor
    :param inference_handle: A model function from the model zoo
    :return: A prediction mask, and the corresponding softmax output
    '''

    logits = exp_config.model_handle(images, training=tf.constant(False, dtype=tf.bool), nlabels=exp_config.nlabels)
    softmax = tf.nn.softmax(logits)
    mask = tf.arg_max(softmax, dimension=-1)

    return mask, softmax 
开发者ID:baumgach,项目名称:acdc_segmenter,代码行数:15,代码来源:model.py

示例10: evaluation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def evaluation(logits, labels, images, nlabels, loss_type):
    '''
    A function for evaluating the performance of the netwrok on a minibatch. This function returns the loss and the 
    current foreground Dice score, and also writes example segmentations and imges to to tensorboard.
    :param logits: Output of network before softmax
    :param labels: Ground-truth label mask
    :param images: Input image mini batch
    :param nlabels: Number of labels in the dataset
    :param loss_type: Which loss should be evaluated
    :return: The loss without weight decay, the foreground dice of a minibatch
    '''

    mask = tf.arg_max(tf.nn.softmax(logits, dim=-1), dimension=-1)  # was 3
    mask_gt = labels

    tf.summary.image('example_gt', prepare_tensor_for_summary(mask_gt, mode='mask', nlabels=nlabels))
    tf.summary.image('example_pred', prepare_tensor_for_summary(mask, mode='mask', nlabels=nlabels))
    tf.summary.image('example_zimg', prepare_tensor_for_summary(images, mode='image'))

    total_loss, nowd_loss, weights_norm = loss(logits, labels, nlabels=nlabels, loss_type=loss_type)

    cdice_structures = losses.per_structure_dice(logits, tf.one_hot(labels, depth=nlabels))
    cdice_foreground = cdice_structures[:,1:]

    cdice = tf.reduce_mean(cdice_foreground)

    return nowd_loss, cdice 
开发者ID:baumgach,项目名称:acdc_segmenter,代码行数:29,代码来源:model.py

示例11: batch_iou_fast

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def batch_iou_fast(anchors, bboxes):
  """ Compute iou of two batch of boxes. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    anchors: know shape
    bboxes: dynamic shape
  Return:
    ious: 2-D with shape '[num_bboxes, num_anchors]'
    indices: [num_bboxes, 1]
  """
  num_anchors = anchors.get_shape().as_list()[0]
  tensor_num_bboxes = tf.shape(bboxes)[0]
  indices = tf.reshape(tf.range(tensor_num_bboxes), shape=[-1, 1])
  indices = tf.reshape(tf.stack([indices]*num_anchors, axis=1), shape=[-1, 1])
  bboxes_m = tf.gather_nd(bboxes, indices)

  anchors_m = tf.tile(anchors, [tensor_num_bboxes, 1])

  lr = tf.maximum(
    tf.minimum(bboxes_m[:, 3], anchors_m[:, 3]) -
    tf.maximum(bboxes_m[:, 1], anchors_m[:, 1]),
    0
  )
  tb = tf.maximum(
    tf.minimum(bboxes_m[:, 2], anchors_m[:, 2]) -
    tf.maximum(bboxes_m[:, 0], anchors_m[:, 0]),
    0
  )
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bboxes_m[:, 3] - bboxes_m[:, 1]), (bboxes_m[:, 2] - bboxes_m[:, 0])) +
    tf.multiply((anchors_m[:, 3] - anchors_m[:, 1]), (anchors_m[:, 2] - anchors_m[:, 0])),
    intersection
  )
  ious = tf.div(intersection, union)

  ious = tf.reshape(ious, shape=[tensor_num_bboxes, num_anchors])

  indices = tf.arg_max(ious, dimension=1)

  return ious, indices 
开发者ID:Zehaos,项目名称:MobileNet,代码行数:42,代码来源:mobilenetdet.py

示例12: calc_reward

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def calc_reward(outputs):
    outputs = outputs[-1] # look at ONLY THE END of the sequence
    outputs = tf.reshape(outputs, (batch_size, cell_out_size))
    h_a_out = weight_variable((cell_out_size, n_classes))

    p_y = tf.nn.softmax(tf.matmul(outputs, h_a_out))
    max_p_y = tf.arg_max(p_y, 1)
    correct_y = tf.cast(labels_placeholder, tf.int64)

    R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32) # reward per example

    reward = tf.reduce_mean(R) # overall reward
    
    p_loc = gaussian_pdf(mean_locs, sampled_locs)
    p_loc = tf.reshape(p_loc, (batch_size, glimpses * 2))

    R = tf.reshape(R, (batch_size, 1))
    J = tf.concat(1, [tf.log(p_y + 1e-5) * onehot_labels_placeholder, tf.log(p_loc + 1e-5) * R])
    J = tf.reduce_sum(J, 1)
    J = tf.reduce_mean(J, 0)
    cost = -J
    
    optimizer = tf.train.AdamOptimizer(lr)
    train_op = optimizer.minimize(cost)

    return cost, reward, max_p_y, correct_y, train_op 
开发者ID:seann999,项目名称:tensorflow_mnist_ram,代码行数:28,代码来源:ram.py

示例13: predictor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def predictor(inputs, targets, target_size):
    init = tf.contrib.layers.xavier_initializer(uniform=True) #uniform=False for truncated normal
    logits = tf.contrib.layers.fully_connected(inputs, target_size, weights_initializer=init, activation_fn=None)

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
            labels=targets), name='predictor_loss')
    predict = tf.arg_max(tf.nn.softmax(logits), 1, name='prediction')
    return [logits, loss, predict] 
开发者ID:JD-AI-Research-Silicon-Valley,项目名称:SACN,代码行数:11,代码来源:tfmodels.py

示例14: compute_iou_from_logits

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def compute_iou_from_logits(preds, labels, num_labels):
  """
  Computes the intersection over union (IoU) score for given logit tensor and target labels
  :param logits: 4D tensor of shape [batch_size, height, width, num_classes]
  :param labels: 3D tensor of shape [batch_size, height, width] and type int32 or int64
  :param num_labels: tensor with the number of labels
  :return: 1D tensor of shape [num_classes] with intersection over union for each class, averaged over batch
  """
  with tf.variable_scope("IoU"):
    # compute predictions
    # probs = softmax(logits, axis=-1)
    # preds = tf.arg_max(probs, dimension=3)
    # num_labels = preds.get_shape().as_list()[-1];
    IoUs = []
    for label in range(num_labels):
      # find pixels with given label
      P = tf.equal(preds, label)
      L = tf.equal(labels, label)
      # Union
      U = tf.logical_or(P, L)
      U = tf.reduce_sum(tf.cast(U, tf.float32))
      # intersection
      I = tf.logical_and(P, L)
      I = tf.reduce_sum(tf.cast(I, tf.float32))

      IOU = tf.cast(I, tf.float32) / tf.cast(U, tf.float32)
      # U might be 0!
      IOU = tf.where(tf.equal(U, 0), 1, IOU)
      IOU = tf.Print(IOU, [IOU], "iou" + repr(label))
      IoUs.append(IOU)
    return tf.reshape(tf.stack(IoUs), (num_labels,)) 
开发者ID:JonathonLuiten,项目名称:PReMVOS,代码行数:33,代码来源:Measures.py

示例15: iou_from_logits

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import arg_max [as 别名]
def iou_from_logits(logits, labels):
  """
  Computes the intersection over union (IoU) score for given logit tensor and target labels
  :param logits: 4D tensor of shape [batch_size, height, width, num_classes]
  :param labels: 3D tensor of shape [batch_size, height, width] and type int32 or int64
  :return: 1D tensor of shape [num_classes] with intersection over union for each class, averaged over batch
  """

  with tf.variable_scope("IoU"):
    # compute predictions
    preds = tf.arg_max(logits, dimension=3)

    num_labels = logits.get_shape().as_list()[-1]
    IoUs = []
    for label in range(num_labels):
      # find pixels with given label
      P = tf.equal(preds, label)
      L = tf.equal(labels, label)

      # Union
      U = tf.logical_or(P, L)
      U = tf.reduce_sum(tf.cast(U, tf.float32))

      # intersection
      I = tf.logical_and(P, L)
      I = tf.reduce_sum(tf.cast(I, tf.float32))

      IoUs.append(I / U)

    return tf.reshape(tf.stack(IoUs), (num_labels,)) 
开发者ID:JonathonLuiten,项目名称:PReMVOS,代码行数:32,代码来源:Util_Network.py


注:本文中的tensorflow.arg_max方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。