当前位置: 首页>>代码示例>>Python>>正文


Python v1.concat方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.concat方法的典型用法代码示例。如果您正苦于以下问题:Python v1.concat方法的具体用法?Python v1.concat怎么用?Python v1.concat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.concat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: collective_group_key

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def collective_group_key(devices):
  """Returns a group key for the set of devices.

  Args:
    devices: list of strings naming devices in a collective group.

  Returns:
    int key uniquely identifying the set of device names.
  """
  global _group_key
  global _group_key_table
  parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
  names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
  concat = ','.join(names)
  if concat not in _group_key_table.keys():
    new_key = _group_key
    _group_key += 1
    _group_key_table[concat] = new_key
  rv = _group_key_table[concat]
  return rv 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:22,代码来源:allreduce.py

示例2: _build_tiled_linear

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def _build_tiled_linear(self, inputs, input_name_and_sizes,
                          output_name_and_sizes, add_bias):
    # pylint: disable=missing-docstring
    def split_output(output):
      if len(output_name_and_sizes) == 1:
        return output
      elif len(set([size for _, size in output_name_and_sizes])) == 1:
        # This is a bit faster than several tf.slice calls.
        return tf.split(output, len(output_name_and_sizes), axis=1)
      else:
        outputs = []
        offset = 0
        for _, output_size in output_name_and_sizes:
          outputs.append(tf.slice(output, [0, offset], [-1, output_size]))
          offset += output_size
        return outputs

    weights = self._ensure_weights()
    if len(inputs) > 1:
      inputs = tf.concat(inputs, 1)
    if add_bias:
      biases = self._ensure_biases()
      return split_output(tf.nn.xw_plus_b(inputs, weights, biases))
    else:
      return split_output(tf.matmul(inputs, weights)) 
开发者ID:deepmind,项目名称:lamb,代码行数:27,代码来源:tiled_linear.py

示例3: simulate

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def simulate(self, action):
    reward, done = self._batch_env.simulate(action)
    with tf.control_dependencies([reward, done]):
      new_observ = tf.expand_dims(self._batch_env.observ, axis=1)

      # If we shouldn't stack, i.e. self.history == 1, then just assign
      # new_observ to self._observ and return from here.
      if self.history == 1:
        with tf.control_dependencies([self._observ.assign(new_observ)]):
          return tf.identity(reward), tf.identity(done)

      # If we should stack, then do the required work.
      old_observ = tf.gather(
          self._observ.read_value(),
          list(range(1, self.history)),
          axis=1)
      with tf.control_dependencies([new_observ, old_observ]):
        with tf.control_dependencies([self._observ.assign(
            tf.concat([old_observ, new_observ], axis=1))]):
          return tf.identity(reward), tf.identity(done) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:tf_atari_wrappers.py

示例4: restore

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:expert_utils.py

示例5: combine

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, weighted by the gates.

    The slice corresponding to a particular batch element `b` is computed
    as the sum over all experts `i` of the expert output, weighted by the
    corresponding gate values.  If `multiply_by_gates` is set to False, the
    gate values are ignored.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean

    Returns:
      a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
    """
    # see comments on convert_gradient_to_tensor
    stitched = common_layers.convert_gradient_to_tensor(
        tf.concat(expert_out, 0))
    if multiply_by_gates:
      stitched *= tf.expand_dims(self._nonzero_gates, 1)
    combined = tf.unsorted_segment_sum(stitched, self._batch_index,
                                       tf.shape(self._gates)[0])
    return combined 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:expert_utils.py

示例6: call

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def call(self, inputs, **kwargs):
    del kwargs
    features = inputs
    set_custom_getter_compose(self._custom_getter)
    tf.get_variable_scope().set_initializer(
        optimize.get_variable_initializer(self.hparams))
    with self._eager_var_store.as_default():
      self._fill_problem_hparams_features(features)
      summarize_features(features, num_shards=self._num_datashards)
      sharded_features = self._shard_features(features)
      sharded_logits, losses = self.model_fn_sharded(sharded_features)
      if isinstance(sharded_logits, dict):
        concat_logits = {}
        for k, v in six.iteritems(sharded_logits):
          concat_logits[k] = tf.concat(v, 0)
        return concat_logits, losses
      else:
        return tf.concat(sharded_logits, 0), losses 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:t2t_model.py

示例7: initialize_write_strengths

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def initialize_write_strengths(self, batch_size):
    """Initialize write strengths which write in both directions.

    Unlike in Grefenstette et al., It's writing out from the center of the
    memory so that it doesn't need to shift the entire memory forward at each
    step.

    Args:
      batch_size: The size of the current batch.

    Returns:
      A tf.float32 tensor of shape [num_write_heads, memory_size, 1].
    """
    memory_center = self._memory_size // 2
    return tf.expand_dims(
        tf.concat([
            # The write strength for the deque bottom.
            # Should be shifted back at each timestep.
            tf.one_hot([[memory_center - 1]] * batch_size,
                       depth=self._memory_size, dtype=tf.float32),
            # The write strength for the deque top.
            # Should be shifted forward at each timestep.
            tf.one_hot([[memory_center]] * batch_size,
                       depth=self._memory_size, dtype=tf.float32)
        ], axis=1), axis=3) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:neural_stack.py

示例8: ae_latent_sample

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def ae_latent_sample(latents_dense, inputs, ed, embed, iters, hparams):
  """Sample from the latent space in the autoencoder."""
  if hparams.num_decode_blocks < 2 and hparams.sampling_temp == 0.0:
    # TODO(lukaszkaiser): beam-search only works in non-blocked mode for now.
    tf.logging.info("Running beam-search for latents with beam size 1.")
    return ae_latent_sample_beam(latents_dense, inputs, ed, embed, hparams)
  latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra")
  latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams)

  def next_bit(latents_discrete, i):
    latents_discrete_prev = latents_discrete
    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      latents_dense = embed(latents_discrete)
      latents_pred = decode_transformer(
          inputs, ed, latents_dense, hparams, "extra")
      latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams)
      return tf.concat([latents_discrete_prev[:, :(i+1), :],
                        latents_discrete[:, (i+1):, :]], axis=1)

  for i in range(iters):
    latents_discrete = next_bit(latents_discrete, i)
  return latents_discrete 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:transformer_vae.py

示例9: add_edge_bias

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def add_edge_bias(x, filter_size):
  """Pad x and concatenates an edge bias across the depth of x.

  The edge bias can be thought of as a binary feature which is unity when
  the filter is being convolved over an edge and zero otherwise.

  Args:
    x: Input tensor, shape (NHWC)
    filter_size: filter_size to determine padding.
  Returns:
    x_pad: Input tensor, shape (NHW(c+1))
  """
  x_shape = common_layers.shape_list(x)
  if filter_size[0] == 1 and filter_size[1] == 1:
    return x
  a = (filter_size[0] - 1) // 2  # vertical padding size
  b = (filter_size[1] - 1) // 2  # horizontal padding size
  padding = [[0, 0], [a, a], [b, b], [0, 0]]
  x_bias = tf.zeros(x_shape[:-1] + [1])

  x = tf.pad(x, padding)
  x_pad = tf.pad(x_bias, padding, constant_values=1)
  return tf.concat([x, x_pad], axis=3) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:glow_ops.py

示例10: sample

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def sample(self, features=None, shape=None):
    del features
    hp = self.hparams
    div_x = 2**hp.num_hidden_layers
    div_y = 1 if self.is1d else 2**hp.num_hidden_layers
    size = [
        hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
        hp.bottleneck_bits
    ]
    size = size if shape is None else shape
    rand = tf.random_uniform(size)
    res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
    # If you want to set some first bits to a fixed value, do this:
    # fixed = tf.zeros_like(rand) - 1.0
    # nbits = 3
    # res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
    return res 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:autoencoders.py

示例11: shake_shake_skip_connection

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def shake_shake_skip_connection(x, output_filters, stride, is_training):
  """Adds a residual connection to the filter x for the shake-shake model."""
  curr_filters = common_layers.shape_list(x)[-1]
  if curr_filters == output_filters:
    return x
  stride_spec = [1, stride, stride, 1]
  # Skip path 1.
  path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID")
  path1 = tf.layers.conv2d(
      path1, int(output_filters / 2), (1, 1), padding="SAME", name="path1_conv")

  # Skip path 2.
  pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]  # First pad with 0's then crop.
  path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
  path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID")
  path2 = tf.layers.conv2d(
      path2, int(output_filters / 2), (1, 1), padding="SAME", name="path2_conv")

  # Concat and apply BN.
  final_path = tf.concat(values=[path1, path2], axis=-1)
  final_path = tf.layers.batch_normalization(
      final_path, training=is_training, name="final_path_bn")
  return final_path 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:shake_shake.py

示例12: final_block

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def final_block(x1, x2, dim='2d', training=True, scope='final_block'):
  """Converts activations from last RevNet block to pre-logits.

  Args:
    x1: [NxHxWxC] tensor of network activations.
    x2: [NxHxWxC] tensor of network activations.
    dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
    training: True for train phase, False for eval phase.
    scope: Optional variable scope for the final block.

  Returns:
    [N, hidden_dim] pre-logits tensor from activations x1 and x2.
  """

  # Final batch norm and relu
  with tf.variable_scope(scope):
    y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])
    y = tf.layers.batch_normalization(y, training=training)
    y = tf.nn.relu(y)

    # Global average pooling
    net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],
                         name='final_pool', keep_dims=True)

    return net 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:revnet.py

示例13: inject_latent

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def inject_latent(self, layer, inputs, target, action):
    """Inject a VAE-style latent."""
    del action
    # Latent for stochastic model
    filters = 128
    full_video = tf.stack(inputs + [target], axis=1)
    latent_mean, latent_std = self.construct_latent_tower(
        full_video, time_axis=1)
    latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
    latent = tfl.flatten(latent)
    latent = tf.expand_dims(latent, axis=1)
    latent = tf.expand_dims(latent, axis=1)
    latent_mask = tfl.dense(latent, filters, name="latent_mask")
    zeros_mask = tf.zeros(
        common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)
    layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)
    extra_loss = self.get_kl_loss([latent_mean], [latent_std])
    return layer, extra_loss 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:basic_stochastic.py

示例14: update_internal_states_early

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def update_internal_states_early(self, internal_states, frames):
    """Update the internal states early in the network in GRU-like way."""
    batch_size = common_layers.shape_list(frames[0])[0]
    internal_state = internal_states[0][0][:batch_size, :, :, :]
    state_activation = tf.concat([internal_state, frames[0]], axis=-1)
    state_gate_candidate = tf.layers.conv2d(
        state_activation, 2 * self.hparams.recurrent_state_size,
        (3, 3), padding="SAME", name="state_conv")
    state_gate, state_candidate = tf.split(state_gate_candidate, 2, axis=-1)
    state_gate = tf.nn.sigmoid(state_gate)
    state_candidate = tf.tanh(state_candidate)
    internal_state = internal_state * state_gate
    internal_state += state_candidate * (1.0 - state_gate)
    max_batch_size = max(_MAX_BATCH, self.hparams.batch_size)
    diff_batch_size = max_batch_size - batch_size
    internal_state = tf.pad(
        internal_state, [[0, diff_batch_size], [0, 0], [0, 0], [0, 0]])
    return [[internal_state]] 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:basic_stochastic.py

示例15: simple_discrete_latent_tower

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import concat [as 别名]
def simple_discrete_latent_tower(self, input_image, target_image):
    hparams = self.hparams

    if self.is_predicting:
      batch_size = common_layers.shape_list(input_image)[0]
      rand = tf.random_uniform([batch_size, hparams.bottleneck_bits])
      bits = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
      return bits

    conv_size = self.tinyify([64, 32, 32, 1])
    pair = tf.concat([input_image, target_image], axis=-1)
    posterior_enc = self.basic_conv_net(pair, conv_size, "posterior_enc")
    posterior_enc = tfl.flatten(posterior_enc)
    bits, _ = discretization.tanh_discrete_bottleneck(
        posterior_enc,
        hparams.bottleneck_bits,
        hparams.bottleneck_noise,
        hparams.discretize_warmup_steps,
        hparams.mode)
    return bits 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:sv2p.py


注:本文中的tensorflow.compat.v1.concat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。