当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.VariableScope方法代码示例

本文整理汇总了Python中tensorflow.VariableScope方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.VariableScope方法的具体用法?Python tensorflow.VariableScope怎么用?Python tensorflow.VariableScope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.VariableScope方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def build(self, hparams, is_training=True):
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmEncoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Encoder:\n'
                    '  input length: %d\n'
                    '  level lengths: %s\n',
                    self._total_length,
                    self._level_lengths)
    self._hierarchical_encoders = []
    num_splits = np.prod(self._level_lengths)
    for i, l in enumerate(self._level_lengths):
      num_splits //= l
      tf.logging.info('Level %d splits: %d', i, num_splits)
      h_encoder = self._core_encoder_cls()
      h_encoder.build(
          hparams, is_training,
          name_or_scope=tf.VariableScope(
              tf.AUTO_REUSE, 'encoder/hierarchical_level_%d' % i))
      self._hierarchical_encoders.append((num_splits, h_encoder)) 
开发者ID:personads,项目名称:synvae,代码行数:25,代码来源:lstm_models.py

示例2: test_define_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def test_define_model(self):
    FLAGS.batch_size = 2
    images_shape = [FLAGS.batch_size, 4, 4, 3]
    images_np = np.zeros(shape=images_shape)
    images = tf.constant(images_np, dtype=tf.float32)
    labels = tf.one_hot([0] * FLAGS.batch_size, 2)

    model = train._define_model(images, labels)
    self.assertIsInstance(model, tfgan.StarGANModel)
    self.assertShapeEqual(images_np, model.generated_data)
    self.assertShapeEqual(images_np, model.reconstructed_data)
    self.assertTrue(isinstance(model.discriminator_variables, list))
    self.assertTrue(isinstance(model.generator_variables, list))
    self.assertIsInstance(model.discriminator_scope, tf.VariableScope)
    self.assertTrue(model.generator_scope, tf.VariableScope)
    self.assertTrue(callable(model.discriminator_fn))
    self.assertTrue(callable(model.generator_fn)) 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:19,代码来源:train_test.py

示例3: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def __init__(self, subnet, name=None, scope=None):
    """Create the Shared operator.

    Use this as:

        f = Shared(Cr(100, 3))
        g = f | f | f

    Ordinarily, you do not need to provide either a name or a scope.
    Providing a name is useful if you want a well-defined namespace
    for the variables (e.g., for saving a subnet).

    Args:
        subnet: Definition of the shared network.
        name: Optional name for the shared context.
        scope: Optional shared scope (must be a Scope, not a string).

    Raises:
        ValueError: Scope is not of type tf.Scope, name is not
        of type string, or both scope and name are given together.
    """
    if scope is not None and not isinstance(scope, tf.VariableScope):
      raise ValueError("scope must be None or a VariableScope")
    if name is not None and not isinstance(scope, str):
      raise ValueError("name must be None or a string")
    if scope is not None and name is not None:
      raise ValueError("cannot provide both a name and a scope")
    if name is None:
      name = "Shared_%d" % Shared.shared_number
      Shared.shared_number += 1
    self.subnet = subnet
    self.name = name
    self.scope = scope 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:35,代码来源:specs_ops.py

示例4: pair_vars_between_scope

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def pair_vars_between_scope(src, dst, src_vars=None, dst_vars=None):
    def canonicalize_scope_name(s):
        if isinstance(s, tf.VariableScope):
            s = s.name
        return s + "/"

    def canonicalize_vars(vars, scope_path):
        if vars is None:
            vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        vd = dict()
        prefix_len = len(scope_path)
        for v in vars:
            if v.name.startswith(scope_path):
                vd[v.name[prefix_len:]] = v
        return vd

    src = canonicalize_scope_name(src)
    dst = canonicalize_scope_name(dst)

    src_vars = canonicalize_vars(src_vars, src)
    dst_vars = canonicalize_vars(dst_vars, dst)

    assert len(dst_vars) == len(src_vars) and all(k in dst_vars for k in src_vars), \
        "variables mismatches"

    pair_list = []
    for k, src_v in src_vars.items():
        pair_list.append((src_v, dst_vars[k]))    # (src, dst)
    return pair_list 
开发者ID:YutingZhang,项目名称:lmdis-rep,代码行数:31,代码来源:tf_graph_utils.py

示例5: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def __init__(self, input_type=None, output_type=None, name_or_scope=None):
    """Creates the layer.

    Args:
      input_type: A type.
      output_type: A type.
      name_or_scope: A string or variable scope. If a string, a new variable
        scope will be created by calling
        [`create_variable_scope`](#create_variable_scope), with defaults
        inherited from the current variable scope. If no caching device is set,
        it will be set to `lambda op: op.device`. This is because `tf.while` can
        be very inefficient if the variables it uses are not cached locally.
    """
    if name_or_scope is None: name_or_scope = type(self).__name__
    if isinstance(name_or_scope, tf.VariableScope):
      self._vscope = name_or_scope
      name = str(self._vscope.name)
    elif isinstance(name_or_scope, six.string_types):
      self._vscope = create_variable_scope(name_or_scope)
      name = name_or_scope
    else:
      raise TypeError('name_or_scope must be a tf.VariableScope or a string: '
                      '%s' % (name_or_scope,))
    if self._vscope.caching_device is None:
      self._vscope.set_caching_device(lambda op: op.device)
    super(Layer, self).__init__(input_type, output_type, name)

    if not hasattr(self, '_constructor_name'):
      self._constructor_name = '__.%s' % self.__class__.__name__
    if not hasattr(self, '_constructor_args'):
      self._constructor_args = None
    if not hasattr(self, '_constructor_kwargs'):
      self._constructor_kwargs = None 
开发者ID:tensorflow,项目名称:fold,代码行数:35,代码来源:layers.py

示例6: _hierarchical_decode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VariableScope [as 别名]
def _hierarchical_decode(self, z, base_decode_fn):
    """Depth first decoding from `z`, passing final embeddings to base fn."""
    batch_size = z.shape[0]
    # Subtract 1 for the core decoder level.
    num_levels = len(self._level_lengths) - 1

    hparams = self.hparams
    batch_size = hparams.batch_size

    def recursive_decode(initial_input, path=None):
      """Recursive hierarchical decode function."""
      path = path or []
      level = len(path)

      if level == num_levels:
        with tf.variable_scope('core_decoder', reuse=tf.AUTO_REUSE):
          return base_decode_fn(initial_input, path)

      scope = tf.VariableScope(
          tf.AUTO_REUSE, 'decoder/hierarchical_level_%d' % level)
      num_steps = self._level_lengths[level]
      with tf.variable_scope(scope):
        state = lstm_utils.initial_cell_state_from_embedding(
            self._hier_cells[level], initial_input, name='initial_state')
      if level not in self._disable_autoregression:
        # The initial input should be the same size as the tensors returned by
        # next level.
        if self._hierarchical_encoder:
          input_size = self._hierarchical_encoder.level(0).output_depth
        elif level == num_levels - 1:
          input_size = sum(nest.flatten(self._core_decoder.state_size))
        else:
          input_size = sum(nest.flatten(self._hier_cells[level + 1].state_size))
        next_input = tf.zeros([batch_size, input_size])
      lower_level_embeddings = []
      for i in range(num_steps):
        if level in self._disable_autoregression:
          next_input = tf.zeros([batch_size, 1])
        else:
          next_input = tf.concat([next_input, initial_input], axis=1)
        with tf.variable_scope(scope):
          output, state = self._hier_cells[level](next_input, state, scope)
        next_input = recursive_decode(output, path + [i])
        lower_level_embeddings.append(next_input)
      if self._hierarchical_encoder:
        # Return the encoding of the outputs using the appropriate level of the
        # hierarchical encoder.
        enc_level = num_levels - level
        return self._hierarchical_encoder.level(enc_level).encode(
            sequence=tf.stack(lower_level_embeddings, axis=1),
            sequence_length=tf.fill([batch_size], num_steps))
      else:
        # Return the final state.
        return tf.concat(nest.flatten(state), axis=-1)

    return recursive_decode(z) 
开发者ID:personads,项目名称:synvae,代码行数:58,代码来源:lstm_models.py


注:本文中的tensorflow.VariableScope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。