当前位置: 首页>>代码示例>>Python>>正文


Python v1.get_variable_scope方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.get_variable_scope方法的典型用法代码示例。如果您正苦于以下问题:Python v1.get_variable_scope方法的具体用法?Python v1.get_variable_scope怎么用?Python v1.get_variable_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.get_variable_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def call(self, inputs, **kwargs):
    del kwargs
    features = inputs
    set_custom_getter_compose(self._custom_getter)
    tf.get_variable_scope().set_initializer(
        optimize.get_variable_initializer(self.hparams))
    with self._eager_var_store.as_default():
      self._fill_problem_hparams_features(features)
      summarize_features(features, num_shards=self._num_datashards)
      sharded_features = self._shard_features(features)
      sharded_logits, losses = self.model_fn_sharded(sharded_features)
      if isinstance(sharded_logits, dict):
        concat_logits = {}
        for k, v in six.iteritems(sharded_logits):
          concat_logits[k] = tf.concat(v, 0)
        return concat_logits, losses
      else:
        return tf.concat(sharded_logits, 0), losses 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:t2t_model.py

示例2: body

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def body(self, features):
    hparams = copy.copy(self._hparams)
    inputs = features["inputs"]
    targets = features["targets"]
    targets_shape = common_layers.shape_list(targets)
    if not (tf.get_variable_scope().reuse or
            hparams.mode == tf.estimator.ModeKeys.PREDICT):
      tf.summary.image("targets", targets, max_outputs=1)

    decoder_input, rows, cols = cia.prepare_decoder(
        targets, hparams)
    # Add class label to decoder input.
    if not hparams.unconditional:
      decoder_input += tf.reshape(inputs,
                                  [targets_shape[0], 1, 1, hparams.hidden_size])

    decoder_output = cia.transformer_decoder_layers(
        decoder_input, None,
        hparams.num_decoder_layers,
        hparams,
        attention_type=hparams.dec_attention_type,
        name="decoder")

    output = cia.create_output(decoder_output, rows, cols, targets, hparams)
    return output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:image_transformer_2d.py

示例3: image_top

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def image_top(body_output, targets, model_hparams, vocab_size):
  """Top transformation for images."""
  del targets  # unused arg
  # TODO(lukaszkaiser): is this a universal enough way to get channels?
  num_channels = model_hparams.problem.num_channels
  with tf.variable_scope("rgb_softmax"):
    body_output_shape = common_layers.shape_list(body_output)
    reshape_shape = body_output_shape[:3]
    reshape_shape.extend([num_channels, vocab_size])
    res = tf.layers.dense(body_output, vocab_size * num_channels)
    res = tf.reshape(res, reshape_shape)
    if not tf.get_variable_scope().reuse:
      res_argmax = tf.argmax(res, axis=-1)
      tf.summary.image(
          "result",
          common_layers.tpu_safe_image_summary(res_argmax),
          max_outputs=1)
    return res 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:modalities.py

示例4: video_l1_top

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def video_l1_top(body_output, targets, model_hparams, vocab_size):
  """Top transformation for video."""
  del targets, vocab_size  # unused arg
  num_channels = model_hparams.problem.num_channels
  num_frames = model_hparams.video_num_target_frames
  with tf.variable_scope("rgb"):
    body_output_shape = common_layers.shape_list(body_output)
    res = tf.layers.dense(body_output, num_channels * num_frames, name="cast")
    res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames])
    res = tf.transpose(res, [0, 4, 1, 2, 3])  # Move frames next to batch.
    if not tf.get_variable_scope().reuse:
      res_argmax = res[:, -1, :, :, :]
      tf.summary.image(
          "result",
          common_layers.tpu_safe_image_summary(res_argmax),
          max_outputs=1)
    return tf.expand_dims(res, axis=-1)  # Add an axis like in perplexity. 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:modalities.py

示例5: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random.uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random.uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
                                    spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
      predictions = tf.argmax(input=logits, axis=1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:tensorflow,项目名称:models,代码行数:24,代码来源:overfeat_test.py

示例6: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random.uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random.uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
                            spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
      predictions = tf.argmax(input=logits, axis=1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:tensorflow,项目名称:models,代码行数:24,代码来源:vgg_test.py

示例7: conv

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def conv(x, conv_size, depth, stride, padding='SAME', params=None):
  """A block that performs convolution."""
  params_keys, params_vars = [], []
  scope_name = tf.get_variable_scope().name
  input_depth = x.get_shape().as_list()[-1]
  if params is None:
    w_conv = weight_variable([conv_size[0], conv_size[1], input_depth, depth])
  else:
    w_conv = params[scope_name + '/kernel']

  params_keys += [scope_name + '/kernel']
  params_vars += [w_conv]

  x = conv2d(x, w_conv, stride=stride, padding=padding)
  params = collections.OrderedDict(zip(params_keys, params_vars))

  return x, params 
开发者ID:google-research,项目名称:meta-dataset,代码行数:19,代码来源:learner.py

示例8: _get_variable

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def _get_variable(self, name, shape,
                    default_initializer=None, default_partitioner=None,
                    default_regularizer=None):
    if len(shape) != 2:
      return super(OverlaidTiledLinear, self)._get_variable(
          name, shape, default_initializer=default_initializer,
          default_partitioner=default_partitioner,
          default_regularizer=default_regularizer)
    else:
      rank = self._find_var_init_param(name, 'overlay_rank', 0)
      sharing_key = self._find_var_init_param(name, 'overlay_sharing_key',
                                              ':name:')
      if sharing_key == ':name:':
        sharing_key = name
      if sharing_key == ':shape:':
        sharing_key = shape
      if (sharing_key in self._matrix_cache and
          not tf.get_variable_scope().reuse):
        scaler = super(OverlaidTiledLinear, self)._get_variable(
            's_'+name, [shape[1]], default_initializer=tf.ones_initializer())
        base = scaler*self._matrix_cache[sharing_key]
      else:
        base = super(OverlaidTiledLinear, self)._get_variable(
            sharing_key, shape, default_initializer=default_initializer,
            default_partitioner=default_partitioner,
            default_regularizer=default_regularizer)
        self._matrix_cache[sharing_key] = base
      if rank == 0:
        return base
      else:
        overlay = self._low_rank_matrix(name, rank=rank, shape=shape)
        return base+overlay 
开发者ID:deepmind,项目名称:lamb,代码行数:34,代码来源:tiled_linear.py

示例9: model_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def model_fn(self, features):
    with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
      self._add_variable_scope("model_fn", vs)
      transformed_features = self.bottom(features)

      if self.hparams.activation_dtype == "bfloat16":
        for k, v in sorted(six.iteritems(transformed_features)):
          if v.dtype == tf.float32:
            transformed_features[k] = tf.cast(v, tf.bfloat16)

      with tf.variable_scope("body") as body_vs:
        self._add_variable_scope("body", body_vs)
        log_info("Building model body")
        body_out = self.body(transformed_features)
      output, losses = self._normalize_body_output(body_out)

      if "training" in losses:
        log_info("Skipping T2TModel top and loss because training loss "
                 "returned from body")
        logits = output
      else:
        logits = self.top(output, features)
        losses["training"] = 0.0
        if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
            self._hparams.mode != "attack"):
          losses["training"] = self.loss(logits, features)

      return logits, losses 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:30,代码来源:t2t_model.py

示例10: _compose_custom_getters

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def _compose_custom_getters(getter_a, getter_b):
  """Compose two custom getters.

  Example use:
  tf.get_variable_scope().set_custom_getter(
    compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))

  This composes getters in the same way as creating a new variable scope with
  the new_getter, but it does not actually create a new variable scope.

  Args:
    getter_a: a custom getter - generally from the existing variable scope.
    getter_b: a custom getter

  Returns:
    a custom getter
  """
  if not getter_a:
    return getter_b
  if not getter_b:
    return getter_a

  def getter_fn(getter, *args, **kwargs):
    return getter_b(functools.partial(getter_a, getter), *args, **kwargs)

  return getter_fn 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:28,代码来源:t2t_model.py

示例11: set_custom_getter_compose

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def set_custom_getter_compose(custom_getter):
  """Set a custom getter in the current variable scope.

  Do not overwrite the existing custom getter - rather compose with it.

  Args:
    custom_getter: a custom getter.
  """
  tf.get_variable_scope().set_custom_getter(
      _compose_custom_getters(tf.get_variable_scope().custom_getter,
                              custom_getter)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:13,代码来源:t2t_model.py

示例12: infer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def infer(self,
            features,
            *args,
            **kwargs):
    """Produce predictions from the model."""
    del args, kwargs
    inputs_old = None
    if "inputs" in features and len(features["inputs"].shape) < 4:
      inputs_old = features["inputs"]
      features["inputs"] = tf.expand_dims(features["inputs"], 2)
    features["targets"] = tf.identity(features["inputs"])

    # logits, _ = self(features)
    t2t_model.set_custom_getter_compose(self._custom_getter)
    tf.get_variable_scope().set_initializer(
        optimize.get_variable_initializer(self.hparams))
    with self._eager_var_store.as_default():
      self._fill_problem_hparams_features(features)
      # intentionally disable sharding during inference (in multi GPU)
      with tf.variable_scope(self.name):
        logits, _, _, targets_mask = self.model_fn(features)

    samples = tf.argmax(logits, axis=-1)
    samples = tf.where(
        tf.cast(targets_mask[..., tf.newaxis, tf.newaxis], tf.bool),
        samples, tf.ones_like(samples))
    if inputs_old is not None:  # Restore to not confuse Estimator.
      features["inputs"] = inputs_old
    return samples 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:31,代码来源:transformer_vae_flow_prior.py

示例13: body

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def body(self, features):
    hparams = copy.copy(self._hparams)
    targets = features["targets"]
    if (hparams.likelihood == cia.DistributionType.DMOL and
        hparams.num_channels != 1):
      raise ValueError("When using DMOL for the likelihood, bottom function "
                       " must be identity and num_channels must be 1.")
    if (not tf.get_variable_scope().reuse and
        hparams.mode != tf.estimator.ModeKeys.PREDICT):
      tf.summary.image("targets", tf.to_float(targets), max_outputs=1)

    # Extra losses list if we want to use moe.
    losses = []
    # Prepare decoder inputs and bias.
    decoder_input, rows, cols = cia.prepare_decoder(targets, hparams)
    # Add class label to decoder input.
    if not hparams.unconditional:
      inputs = features["inputs"]
      decoder_input += tf.reshape(
          inputs,
          [common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
    decoder_output = cia.transformer_decoder_layers(
        decoder_input,
        None,
        hparams.num_decoder_layers or hparams.num_hidden_layers,
        hparams,
        attention_type=hparams.dec_attention_type,
        losses=losses,
        name="decoder")
    output = cia.create_output(decoder_output, rows, cols, targets, hparams)

    if losses:
      return output, {"extra_loss": tf.add_n(losses)}
    else:
      return output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:37,代码来源:image_transformer.py

示例14: dense

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def dense(x, units, **kwargs):
  """Identical to layers.dense."""
  layer_collection = kwargs.pop("layer_collection", None)
  activations = layers().Dense(units, **kwargs)(x)
  if layer_collection:
    # We need to find the layer parameters using scope name for the layer, so
    # check that the layer is named. Otherwise parameters for different layers
    # may get mixed up.
    layer_name = tf.get_variable_scope().name
    if (not layer_name) or ("name" not in kwargs):
      raise ValueError(
          "Variable scope and layer name cannot be empty. Actual: "
          "variable_scope={}, layer name={}".format(
              layer_name, kwargs.get("name", None)))

    layer_name += "/" + kwargs["name"]
    layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope=layer_name)
    assert layer_params
    if len(layer_params) == 1:
      layer_params = layer_params[0]

    tf.logging.info(
        "Registering dense layer to collection for tensor: {}".format(
            layer_params))

    x_shape = x.shape.as_list()
    if len(x_shape) == 3:
      # Handle [batch, time, depth] inputs by folding batch and time into
      # one dimension: reshaping inputs to [batchxtime, depth].
      x_2d = tf.reshape(x, [-1, x_shape[2]])
      activations_shape = activations.shape.as_list()
      activations_2d = tf.reshape(activations, [-1, activations_shape[2]])
      layer_collection.register_fully_connected_multi(
          layer_params, x_2d, activations_2d, num_uses=x_shape[1])
      activations = tf.reshape(activations_2d, activations_shape)
    else:
      layer_collection.register_fully_connected(layer_params, x, activations)
  return activations 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:41,代码来源:common_layers.py

示例15: should_generate_summaries

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import get_variable_scope [as 别名]
def should_generate_summaries():
  """Is this an appropriate context to generate summaries.

  Returns:
    a boolean
  """
  name_scope = contrib.framework().get_name_scope()
  if name_scope and "while/" in name_scope:
    # Summaries don't work well within tf.while_loop()
    return False
  if tf.get_variable_scope().reuse:
    # Avoid generating separate summaries for different data shards
    return False
  return True 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:16,代码来源:common_layers.py


注:本文中的tensorflow.compat.v1.get_variable_scope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。