當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.accuracy方法代碼示例

本文整理匯總了Python中layers.accuracy方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.accuracy方法的具體用法?Python layers.accuracy怎麽用?Python layers.accuracy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在layers的用法示例。


在下文中一共展示了layers.accuracy方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: classifier_graph

# 需要導入模塊: import layers [as 別名]
# 或者: from layers import accuracy [as 別名]
def classifier_graph(self):
    """Constructs classifier graph from inputs to classifier loss.

    * Caches the VatxtInput object in `self.cl_inputs`
    * Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`

    Returns:
      loss: scalar float.
    """
    inputs = _inputs('train', pretrain=False)
    self.cl_inputs = inputs
    embedded = self.layers['embedding'](inputs.tokens)
    self.tensors['cl_embedded'] = embedded

    _, next_state, logits, loss = self.cl_loss_from_embedding(
        embedded, return_intermediates=True)
    tf.summary.scalar('classification_loss', loss)
    self.tensors['cl_logits'] = logits
    self.tensors['cl_loss'] = loss

    acc = layers_lib.accuracy(logits, inputs.labels, inputs.weights)
    tf.summary.scalar('accuracy', acc)

    adv_loss = (self.adversarial_loss() * tf.constant(
        FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
    tf.summary.scalar('adversarial_loss', adv_loss)

    total_loss = loss + adv_loss
    tf.summary.scalar('total_classification_loss', total_loss)

    with tf.control_dependencies([inputs.save_state(next_state)]):
      total_loss = tf.identity(total_loss)

    return total_loss 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:36,代碼來源:graphs.py

示例2: eval_graph

# 需要導入模塊: import layers [as 別名]
# 或者: from layers import accuracy [as 別名]
def eval_graph(self, dataset='test'):
    """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
    inputs = _inputs(dataset, pretrain=False)
    embedded = self.layers['embedding'](inputs.tokens)
    _, next_state, logits, _ = self.cl_loss_from_embedding(
        embedded, inputs=inputs, return_intermediates=True)

    eval_ops = {
        'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), inputs.labels, inputs.weights)
    }

    with tf.control_dependencies([inputs.save_state(next_state)]):
      acc, acc_update = eval_ops['accuracy']
      acc_update = tf.identity(acc_update)
      eval_ops['accuracy'] = (acc, acc_update)

    var_restore_dict = make_restore_average_vars_dict()
    return eval_ops, var_restore_dict 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:31,代碼來源:graphs.py

示例3: build_eval_graph

# 需要導入模塊: import layers [as 別名]
# 或者: from layers import accuracy [as 別名]
def build_eval_graph(x, y, ul_x):
    losses = {}
    logit = vat.forward(x, is_training=False, update_batch_stats=False)
    nll_loss = L.ce_loss(logit, y)
    losses['NLL'] = nll_loss
    acc = L.accuracy(logit, y)
    losses['Acc'] = acc
    scope = tf.get_variable_scope()
    scope.reuse_variables()
    at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False)
    losses['AT_loss'] = at_loss
    ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
    vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit, is_training=False)
    losses['VAT_loss'] = vat_loss
    return losses 
開發者ID:takerum,項目名稱:vat_tf,代碼行數:17,代碼來源:train_semisup.py

示例4: classifier_graph

# 需要導入模塊: import layers [as 別名]
# 或者: from layers import accuracy [as 別名]
def classifier_graph(self):
    """Constructs classifier graph from inputs to classifier loss.

    * Caches the VatxtInput object in `self.cl_inputs`
    * Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`

    Returns:
      loss: scalar float.
    """
    inputs = _inputs('train', pretrain=False)
    self.cl_inputs = inputs
    embedded = self.layers['embedding'](inputs.tokens)
    self.tensors['cl_embedded'] = embedded

    _, next_state, logits, loss = self.cl_loss_from_embedding(
        embedded, return_intermediates=True)
    tf.summary.scalar('classification_loss', loss)
    self.tensors['cl_logits'] = logits
    self.tensors['cl_loss'] = loss

    if FLAGS.single_label:
      indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
      labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
      weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
    else:
      labels = inputs.labels
      weights = inputs.weights
    acc = layers_lib.accuracy(logits, labels, weights)
    tf.summary.scalar('accuracy', acc)

    adv_loss = (self.adversarial_loss() * tf.constant(
        FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
    tf.summary.scalar('adversarial_loss', adv_loss)

    total_loss = loss + adv_loss

    with tf.control_dependencies([inputs.save_state(next_state)]):
      total_loss = tf.identity(total_loss)
      tf.summary.scalar('total_classification_loss', total_loss)
    return total_loss 
開發者ID:itsamitgoel,項目名稱:Gun-Detector,代碼行數:42,代碼來源:graphs.py

示例5: eval_graph

# 需要導入模塊: import layers [as 別名]
# 或者: from layers import accuracy [as 別名]
def eval_graph(self, dataset='test'):
    """Constructs classifier evaluation graph.

    Args:
      dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.

    Returns:
      eval_ops: dict<metric name, tuple(value, update_op)>
      var_restore_dict: dict mapping variable restoration names to variables.
        Trainable variables will be mapped to their moving average names.
    """
    inputs = _inputs(dataset, pretrain=False)
    embedded = self.layers['embedding'](inputs.tokens)
    _, next_state, logits, _ = self.cl_loss_from_embedding(
        embedded, inputs=inputs, return_intermediates=True)

    if FLAGS.single_label:
      indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
      labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
      weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
    else:
      labels = inputs.labels
      weights = inputs.weights
    eval_ops = {
        'accuracy':
            tf.contrib.metrics.streaming_accuracy(
                layers_lib.predictions(logits), labels, weights)
    }

    with tf.control_dependencies([inputs.save_state(next_state)]):
      acc, acc_update = eval_ops['accuracy']
      acc_update = tf.identity(acc_update)
      eval_ops['accuracy'] = (acc, acc_update)

    var_restore_dict = make_restore_average_vars_dict()
    return eval_ops, var_restore_dict 
開發者ID:itsamitgoel,項目名稱:Gun-Detector,代碼行數:38,代碼來源:graphs.py


注:本文中的layers.accuracy方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。