当前位置: 首页>>代码示例>>Python>>正文


Python v1.AUTO_REUSE属性代码示例

本文整理汇总了Python中tensorflow.compat.v1.AUTO_REUSE属性的典型用法代码示例。如果您正苦于以下问题:Python v1.AUTO_REUSE属性的具体用法?Python v1.AUTO_REUSE怎么用?Python v1.AUTO_REUSE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.AUTO_REUSE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fprop

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def fprop(self, x):
    if x.name in self._logits_dict:
      return self._logits_dict[x.name]

    x = tf.map_fn(tf.image.per_image_standardization, x)
    self._additional_features['inputs'] = x

    if self._scope is None:
      scope = tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE)
    else:
      scope = tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE)

    with scope:
      logits = self._model_fn(
          self._additional_features,
          None,
          'attack',
          params=self._params,
          config=self._config)
    self._logits_dict[x.name] = logits

    return {model.Model.O_LOGITS: tf.reshape(logits, [-1, logits.shape[-1]])} 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:adv_attack_utils.py

示例2: actnorm_3d

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def actnorm_3d(name, x, logscale_factor=3.):
  """Applies actnorm to each time-step independently.

  There are a total of 2*n_channels*n_steps parameters learnt.

  Args:
    name: variable scope.
    x: 5-D Tensor, (NTHWC)
    logscale_factor: Increases the learning rate of the scale by
                     logscale_factor.
  Returns:
    x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x = tf.unstack(x, axis=1)
    x_normed = []
    for ind, x_step in enumerate(x):
      x_step, _ = actnorm("actnorm_%d" % ind, x_step,
                          logscale_factor=logscale_factor)
      x_normed.append(x_step)
    return tf.stack(x_normed, axis=1), None 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:glow_ops.py

示例3: single_conv_dist

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def single_conv_dist(name, x, output_channels=None):
  """A 3x3 convolution mapping x to a standard normal distribution at init.

  Args:
    name: variable scope.
    x: 4-D Tensor.
    output_channels: number of channels of the mean and std.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x_shape = common_layers.shape_list(x)
    if output_channels is None:
      output_channels = x_shape[-1]
    mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
                          conv_init="zeros", apply_actnorm=False)
    mean = mean_log_scale[:, :, :, 0::2]
    log_scale = mean_log_scale[:, :, :, 1::2]
    return tf.distributions.Normal(mean, tf.exp(log_scale)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:glow_ops.py

示例4: revnet

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def revnet(name, x, hparams, reverse=True):
  """'hparams.depth' steps of generative flow.

  Args:
    name: variable scope for the revnet block.
    x: 4-D Tensor, shape=(NHWC).
    hparams: HParams.
    reverse: bool, forward or backward pass.
  Returns:
    x: 4-D Tensor, shape=(NHWC).
    objective: float.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    steps = np.arange(hparams.depth)
    if reverse:
      steps = steps[::-1]

    objective = 0.0
    for step in steps:
      x, curr_obj = revnet_step(
          "revnet_step_%d" % step, x, hparams, reverse=reverse)
      objective += curr_obj
    return x, objective 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:glow_ops.py

示例5: scale_gaussian_prior

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
  """Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.

  s^i is a learnable parameter with identity initialization.
  std^i is optionally learnable with identity initialization.

  Args:
    name: variable scope.
    z: input_tensor
    logscale_factor: equivalent to scaling up the learning_rate by a factor
                     of logscale_factor.
    trainable: Whether or not std^i is learnt.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    z_shape = common_layers.shape_list(z)
    latent_multiplier = tf.get_variable(
        "latent_multiplier", shape=z_shape, dtype=tf.float32,
        initializer=tf.ones_initializer())
    log_scale = tf.get_variable(
        "log_scale_latent", shape=z_shape, dtype=tf.float32,
        initializer=tf.zeros_initializer(), trainable=trainable)
    log_scale = log_scale * logscale_factor
    return tfp.distributions.Normal(
        loc=latent_multiplier * z, scale=tf.exp(log_scale)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:glow_ops.py

示例6: residual_shuffle_network

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def residual_shuffle_network(inputs, hparams):
  """Residual Shuffle-Exchange network with weight sharing.

  Args:
    inputs: inputs to the Shuffle-Exchange network. Should be in length of power
      of 2.
    hparams: Model configuration

  Returns:
    tf.Tensor: Outputs of the Shuffle-Exchange last layer
  """
  input_shape = tf.shape(inputs)
  n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0)
  n_bits = tf.cast(n_bits, tf.int32) + 1

  block_out = inputs

  for k in range(hparams.num_hidden_layers):
    with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE):
      forward_output = forward_part(block_out, hparams, n_bits)
      block_out = reverse_part(forward_output, hparams, n_bits)

  return RSU("last_layer", hparams.dropout, hparams.mode)(block_out) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:residual_shuffle_exchange.py

示例7: iterative_encoder_decoder

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def iterative_encoder_decoder(encoder_input,
                              encoder_self_attention_bias,
                              encoder_decoder_attention_bias,
                              query,
                              hparams):
  """Iterative encoder decoder."""
  for _ in range(hparams.num_rec_steps):
    with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
      encoder_output = image_question_encoder(
          encoder_input,
          encoder_self_attention_bias,
          hparams,
          query)

      decoder_output = decoder(
          query,
          encoder_output,
          None,
          encoder_decoder_attention_bias,
          hparams)

      encoder_input = encoder_output
      query = decoder_output

      return decoder_output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:vqa_self_attention.py

示例8: transformer_decoder_layers

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def transformer_decoder_layers(name,
                               n_layers,
                               decoder_input,
                               **kwargs):
  """A transformation block composed of transformer decoder layers."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    hparams = kwargs["hparams"]
    outputs = decoder_input
    with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
      for layer_idx in range(n_layers):
        outputs = transformer_decoder_layer(
            decoder_input=outputs,
            layer_idx=layer_idx,
            **kwargs)
      outputs = common_layers.layer_preprocess(outputs, hparams)
    return outputs 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:transformer_vae_flow_prior_ops.py

示例9: posterior

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def posterior(
    name, hparams, targets, targets_mask, decoder_self_attention_bias,
    **kwargs):
  """Compute mu and sigma for diagonal normal posterior q(z|x,y)."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    decoder_input = drop_2d(targets, hparams.mode, hparams.posterior_2d_dropout)
    decoder_input = common_attention.add_timing_signal_1d(decoder_input)
    decoder_input = tf.nn.dropout(decoder_input,
                                  rate=hparams.layer_prepostprocess_dropout)
    decoder_output = transformer_decoder_layers(
        "block",
        n_layers=hparams.n_posterior_layers,
        decoder_input=decoder_input,
        hparams=hparams,
        decoder_self_attention_bias=decoder_self_attention_bias,
        **kwargs)
    decoder_output = gops.dense_weightnorm(
        "h2o_out", decoder_output, hparams.latent_size * 2, targets_mask,
        init_scale=0.0, init=False)
    return decoder_output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:transformer_vae_flow_prior_ops.py

示例10: decoder

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs):
  """Compute final hidden states for p(y|z,x)."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout)
    if hparams.pos_attn:
      decoder_input = gops.positional_attention(
          "pos_attn", decoder_input, decoder_self_attention_bias, hparams)
    else:
      decoder_input = common_attention.add_timing_signal_1d(decoder_input)
    if common_layers.shape_list(latents)[-1] != hparams.hidden_size:
      decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size)
    decoder_output = transformer_decoder_layers(
        "block",
        n_layers=hparams.n_decoder_layers,
        decoder_input=decoder_input,
        hparams=hparams,
        decoder_self_attention_bias=decoder_self_attention_bias,
        **kwargs)
    batch_size, targets_length = common_layers.shape_list(decoder_output)[:2]
    decoder_output = tf.reshape(
        decoder_output, [batch_size, targets_length, 1, hparams.hidden_size])
    # Expand since t2t expects 4d tensors.
    return decoder_output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:transformer_vae_flow_prior_ops.py

示例11: flow_step_glow

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def flow_step_glow(name, x, x_mask, split_dims, inverse, init, dtype, **kwargs):
  """One step of flow."""
  conv_fn = multihead_invertible_1x1_conv_np
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    reversible_ops = []
    for _, split_dim in enumerate(split_dims):
      identity_first = True
      reversible_ops += [functools.partial(actnorm, name="actnorm", init=init)]
      if split_dim in "ca":
        multihead_split = "a" if split_dim == "c" else "c"
        reversible_ops += [functools.partial(
            conv_fn, name="conv_{}".format(multihead_split),
            multihead_split=multihead_split, dtype=dtype)]
      reversible_ops += [functools.partial(
          coupling, name="coupling_{}".format(split_dim),
          split_dim=split_dim, identity_first=identity_first, init=init,
          **kwargs)]
    if inverse:
      reversible_ops = reversible_ops[::-1]

    logabsdets = tf.constant(0.0, dtype=dtype)
    for reversible_op in reversible_ops:
      x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse)
      logabsdets += logabsdet
    return x, logabsdets 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:transformer_glow_layers.py

示例12: flow_level

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def flow_level(
    name, x, x_mask, depth, split_dims, prior, inverse, init, dtype, **kwargs):
  """One level of flow."""
  flow_step_fn = flow_step_glow
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    reversible_ops = []
    for step in np.arange(depth):
      reversible_ops += [functools.partial(
          flow_step_fn, name="{}_step".format(step), split_dims=split_dims,
          init=init, dtype=dtype, **kwargs)]
    if prior:
      reversible_ops += [functools.partial(
          coupling, name="{}_prior".format(depth), split_dim="c",
          identity_first=True, init=init, **kwargs)]
    if inverse:
      reversible_ops = reversible_ops[::-1]

    logabsdets = tf.constant(0.0, dtype=dtype)
    for reversible_op in reversible_ops:
      x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse)
      logabsdets += logabsdet
    return x, logabsdets 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:transformer_glow_layers.py

示例13: video_bitwise_targets_bottom

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def video_bitwise_targets_bottom(x, model_hparams, vocab_size):
  """Bottom transformation for embedding target video bitwise."""
  pixel_embedding_size = 64
  inputs = x
  with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
    common_layers.summarize_video(inputs, "targets_bottom")
    # Embed bitwise.
    assert vocab_size == 256
    embedded = discretization.int_to_bit_embed(inputs, 8,
                                               pixel_embedding_size)
    # Transpose and project.
    transposed = common_layers.time_to_channels(embedded)
    return tf.layers.dense(
        transposed,
        model_hparams.hidden_size,
        name="merge_pixel_embedded_frames") 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:modalities.py

示例14: dense_weightnorm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def dense_weightnorm(
    name, x, n_out, x_mask, init_scale, init, dtype=tf.float32):
  """Dense layer with weight normalization."""
  n_in = common_layers.shape_list(x)[2]
  eps = tf.keras.backend.epsilon()
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    v = tf.get_variable(
        "v", [n_in, n_out], dtype,
        initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
    v = v / tf.norm(v, axis=0, keepdims=True)
    t = tf.matmul(x, v)  # [B, L, n_out]
    mean, var = moments_over_bl(t, x_mask)
    g_init = init_scale / (tf.sqrt(var) + eps)
    g = get_variable_ddi(
        "g", [n_out], g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    b = get_variable_ddi(
        "b", [n_out], -mean*g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    w = g * v
    y = tf.matmul(x, w) + b
    tf.summary.histogram("_g", g)
    return y 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:transformer_glow_layers_ops.py

示例15: post_attention

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import AUTO_REUSE [as 别名]
def post_attention(self, token, x):
    """Called after self-attention. The memory can be updated here.

    Args:
      token: Data returned by pre_attention, which can be used to carry over
        state related to the current memory operation.
      x: a Tensor of data after self-attention and feed-forward
    Returns:
      a (possibly modified) version of the input x
    """
    with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
      depth = common_layers.shape_list(x)[-1]
      actual_batch_size = common_layers.shape_list(x)[0]
      memory_output = tf.gather(token["retrieved_mem"],
                                tf.range(actual_batch_size))
      output = tf.add(tf.layers.dense(x, depth, use_bias=False),
                      tf.layers.dense(memory_output, depth))
      with tf.control_dependencies([output]):
        with tf.control_dependencies([
            self.write(token["x"], token["access_logits"])]):
          return tf.identity(output) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:transformer_memory.py


注:本文中的tensorflow.compat.v1.AUTO_REUSE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。