當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.variable_scope方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.variable_scope方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.variable_scope方法的具體用法?Python v1.variable_scope怎麽用?Python v1.variable_scope使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.variable_scope方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _build_aux_head

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def _build_aux_head(net, end_points, num_classes, hparams, scope):
  """Auxiliary head used for all models across all datasets."""
  with tf.variable_scope(scope):
    aux_logits = tf.identity(net)
    with tf.variable_scope('aux_logits'):
      aux_logits = slim.avg_pool2d(
          aux_logits, [5, 5], stride=3, padding='VALID')
      aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
      aux_logits = tf.nn.relu(aux_logits)
      # Shape of feature map before the final layer.
      shape = aux_logits.shape
      if hparams.data_format == 'NHWC':
        shape = shape[1:3]
      else:
        shape = shape[2:4]
      aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
      aux_logits = tf.nn.relu(aux_logits)
      aux_logits = contrib_layers.flatten(aux_logits)
      aux_logits = slim.fully_connected(aux_logits, num_classes)
      end_points['AuxLogits'] = aux_logits 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:nasnet_model.py

示例2: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def __init__(self, tensors):
    tensors = list(tensors)
    with tf.variable_scope('averaged'):
      self._num_samples = tf.Variable(0, name='num_samples', trainable=False)
      with tf.variable_scope('avg'):
        self._averages = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
      with tf.variable_scope('save'):
        self._saves = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
    self._tensors = tensors
    self._take_sample = self._make_take_sample()
    self._switch = self._make_swith_to_average()
    self._restore = self._make_restore()
    self._reset = self._make_reset() 
開發者ID:deepmind,項目名稱:lamb,代碼行數:25,代碼來源:averaged.py

示例3: layer_norm

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def layer_norm(x, reduction_indices, epsilon=1e-9, gain=None, bias=None,
               per_element=True, scope=None):
  """DOC."""
  reduction_indices = ensure_list(reduction_indices)
  mean = tf.reduce_mean(x, reduction_indices, keep_dims=True)
  variance = tf.reduce_mean(tf.squared_difference(x, mean),
                            reduction_indices, keep_dims=True)
  normalized = (x - mean) / tf.sqrt(variance + epsilon)
  dtype = x.dtype
  shape = x.get_shape().as_list()
  for i in six.moves.range(len(shape)):
    if i not in reduction_indices or not per_element:
      shape[i] = 1
  with tf.variable_scope(scope or 'layer_norm'):
    if gain is None:
      gain = tf.get_variable('gain', shape=shape, dtype=dtype,
                             initializer=tf.ones_initializer())
    if bias is None:
      bias = tf.get_variable('bias', shape=shape, dtype=dtype,
                             initializer=tf.zeros_initializer())
  return gain*normalized+bias 
開發者ID:deepmind,項目名稱:lamb,代碼行數:23,代碼來源:utils.py

示例4: padded_accuracy_topk

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:metrics.py

示例5: set_precision

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例6: set_recall

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:metrics.py

示例7: softmax_cross_entropy_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):
  """Calculate softmax cross entropy given one-hot labels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    cross-entropy (scalar), weights
  """
  with tf.variable_scope("softmax_cross_entropy_one_hot",
                         values=[logits, labels]):
    del weights_fn
    cross_entropy = tf.losses.softmax_cross_entropy(
        onehot_labels=labels, logits=logits)
    return cross_entropy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:metrics.py

示例8: sigmoid_accuracy_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
  """Calculate accuracy for a set, given one-hot labels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    accuracy (scalar), weights
  """
  with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
    del weights_fn
    predictions = tf.nn.sigmoid(logits)
    labels = tf.argmax(labels, -1)
    predictions = tf.argmax(predictions, -1)
    _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
    return accuracy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:metrics.py

示例9: sigmoid_precision_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
  """Calculate precision for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    precision (scalar), weights
  """
  with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, precision = tf.metrics.precision(labels=labels, predictions=predictions)
    return precision, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例10: sigmoid_recall_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
  """Calculate recall for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    recall (scalar), weights
  """
  with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, recall = tf.metrics.recall(labels=labels, predictions=predictions)
    return recall, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例11: sigmoid_cross_entropy_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
  """Calculate sigmoid cross entropy for one-hot lanels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    cross_entropy (scalar), weights
  """
  with tf.variable_scope("sigmoid_cross_entropy_one_hot",
                         values=[logits, labels]):
    del weights_fn
    cross_entropy = tf.losses.sigmoid_cross_entropy(
        multi_class_labels=labels, logits=logits)
    return cross_entropy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:metrics.py

示例12: roc_auc

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def roc_auc(logits, labels, weights_fn=None):
  """Calculate ROC AUC.

  Requires binary classes.

  Args:
    logits: Tensor of size [batch_size, 1, 1, num_classes]
    labels: Tensor of size [batch_size, 1, 1, num_classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    ROC AUC (scalar), weights
  """
  del weights_fn
  with tf.variable_scope("roc_auc", values=[logits, labels]):
    predictions = tf.argmax(logits, axis=-1)
    _, auc = tf.metrics.auc(labels, predictions, curve="ROC")
    return auc, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:metrics.py

示例13: loss_fn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def loss_fn(self, targets, logits):
    """Constructs loss dict.

    Args:
      targets: [batch_size, seq_len]
      logits: [batch_size, seq_len, vocab_size]

    Returns:
      {str: Tensor of shape []}. Losses.
    """
    batch_size, seq_len, vocab_size = common_layers.shape_list(logits)
    targets = tf.reshape(targets, [batch_size, seq_len, 1, 1])
    logits = tf.reshape(logits, [batch_size, seq_len, 1, 1, vocab_size])
    features = copy.copy(self._features)
    features["targets"] = targets

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      losses = {
          "training": self._t2tmodel.loss(logits, features),
      }

    return losses 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:scheduled_sampling.py

示例14: fprop

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def fprop(self, x):
    if x.name in self._logits_dict:
      return self._logits_dict[x.name]

    x = tf.map_fn(tf.image.per_image_standardization, x)
    self._additional_features['inputs'] = x

    if self._scope is None:
      scope = tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE)
    else:
      scope = tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE)

    with scope:
      logits = self._model_fn(
          self._additional_features,
          None,
          'attack',
          params=self._params,
          config=self._config)
    self._logits_dict[x.name] = logits

    return {model.Model.O_LOGITS: tf.reshape(logits, [-1, logits.shape[-1]])} 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:adv_attack_utils.py

示例15: _rnn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import variable_scope [as 別名]
def _rnn(self, inputs, name, initial_state=None, sequence_length=None):
    """A helper method to build tf.nn.dynamic_rnn.

    Args:
      inputs: The inputs to the RNN. A tensor of shape
              [batch_size, max_seq_length, embedding_size]
      name: A namespace for the RNN.
      initial_state: An optional initial state for the RNN.
      sequence_length: An optional sequence length for the RNN.

    Returns:
      A tf.nn.dynamic_rnn operator.
    """
    layers = [self.cell(layer_size)
              for layer_size in self._hparams.controller_layer_sizes]
    with tf.variable_scope(name):
      return tf.nn.dynamic_rnn(
          contrib.rnn().MultiRNNCell(layers),
          inputs,
          initial_state=initial_state,
          sequence_length=sequence_length,
          dtype=tf.float32,
          time_major=False) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:neural_stack.py


注:本文中的tensorflow.compat.v1.variable_scope方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。