当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.tile方法代码示例

本文整理汇总了Python中tensorflow.tile方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.tile方法的具体用法?Python tensorflow.tile怎么用?Python tensorflow.tile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.tile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: minibatch_stddev_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def minibatch_stddev_layer(x, group_size=4):
    with tf.variable_scope('MinibatchStddev'):
        group_size = tf.minimum(group_size, tf.shape(x)[0])     # Minibatch must be divisible by (or smaller than) group_size.
        s = x.shape                                             # [NCHW]  Input shape.
        y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]])   # [GMCHW] Split minibatch into M groups of size G.
        y = tf.cast(y, tf.float32)                              # [GMCHW] Cast to FP32.
        y -= tf.reduce_mean(y, axis=0, keep_dims=True)           # [GMCHW] Subtract mean over group.
        y = tf.reduce_mean(tf.square(y), axis=0)                # [MCHW]  Calc variance over group.
        y = tf.sqrt(y + 1e-8)                                   # [MCHW]  Calc stddev over group.
        y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True)      # [M111]  Take average over fmaps and pixels.
        y = tf.cast(y, x.dtype)                                 # [M111]  Cast back to original data type.
        y = tf.tile(y, [group_size, 1, s[2], s[3]])             # [N1HW]  Replicate over group and pixels.
        return tf.concat([x, y], axis=1)                        # [NCHW]  Append as new fmap.

#----------------------------------------------------------------------------
# Generator network used in the paper. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:18,代码来源:networks.py

示例2: pad_and_reshape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def pad_and_reshape(instr_spec, frame_length, F):
    """
    :param instr_spec:
    :param frame_length:
    :param F:
    :returns:
    """
    spec_shape = tf.shape(instr_spec)
    extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
    n_extra_row = (frame_length) // 2 + 1 - F
    extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
    extended_spec = tf.concat([instr_spec, extension], axis=2)
    old_shape = tf.shape(extended_spec)
    new_shape = tf.concat([
        [old_shape[0] * old_shape[1]],
        old_shape[2:]],
        axis=0)
    processed_instr_spec = tf.reshape(extended_spec, new_shape)
    return processed_instr_spec 
开发者ID:deezer,项目名称:spleeter,代码行数:21,代码来源:tensor.py

示例3: encode_coordinates_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def encode_coordinates_fn(self, net):
    """Adds one-hot encoding of coordinates to different views in the networks.

    For each "pixel" of a feature map it adds a onehot encoded x and y
    coordinates.

    Args:
      net: a tensor of shape=[batch_size, height, width, num_features]

    Returns:
      a tensor with the same height and width, but altered feature_size.
    """
    mparams = self._mparams['encode_coordinates_fn']
    if mparams.enabled:
      batch_size, h, w, _ = net.shape.as_list()
      x, y = tf.meshgrid(tf.range(w), tf.range(h))
      w_loc = slim.one_hot_encoding(x, num_classes=w)
      h_loc = slim.one_hot_encoding(y, num_classes=h)
      loc = tf.concat([h_loc, w_loc], 2)
      loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
      return tf.concat([net, loc], 3)
    else:
      return net 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:model.py

示例4: compute_column_softmax

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def compute_column_softmax(self, column_controller_vector, time_step):
    #compute softmax over all the columns using column controller vector
    column_controller_vector = tf.tile(
        tf.expand_dims(column_controller_vector, 1),
        [1, self.num_cols + self.num_word_cols, 1])  #max_cols * bs * d
    column_controller_vector = nn_utils.apply_dropout(
        column_controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.full_column_hidden_vectors = tf.concat(
        axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors])
    self.full_column_hidden_vectors += self.summary_text_entry_embeddings
    self.full_column_hidden_vectors = nn_utils.apply_dropout(
        self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode)
    column_logits = tf.reduce_sum(
        column_controller_vector * self.full_column_hidden_vectors, 2) + (
            self.params["word_match_feature_column_name"] *
            self.batch_column_exact_match) + self.full_column_mask
    column_softmax = tf.nn.softmax(column_logits)  #batch_size * max_cols
    return column_softmax 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:model.py

示例5: _create_initial_states

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def _create_initial_states(self, stride):
    """Returns stacked and batched initial states for the bi-LSTM."""
    initial_states_forward = []
    initial_states_backward = []
    for index in range(len(self._hidden_layer_sizes)):
      # Retrieve the initial states for this layer.
      states_sxd = []
      for direction in ['forward', 'backward']:
        for substate in ['c', 'h']:
          state_1xd = self._component.get_variable('initial_state_%s_%s_%d' %
                                                   (direction, substate, index))
          state_sxd = tf.tile(state_1xd, [stride, 1])  # tile across the batch
          states_sxd.append(state_sxd)

      # Assemble and append forward and backward LSTM states.
      initial_states_forward.append(
          tf.contrib.rnn.LSTMStateTuple(states_sxd[0], states_sxd[1]))
      initial_states_backward.append(
          tf.contrib.rnn.LSTMStateTuple(states_sxd[2], states_sxd[3]))
    return initial_states_forward, initial_states_backward 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:wrapped_units.py

示例6: _batch_decode_refined_boxes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def _batch_decode_refined_boxes(self, refined_box_encodings, proposal_boxes):
    """Decode tensor of refined box encodings.

    Args:
      refined_box_encodings: a 3-D tensor with shape
        [batch_size, max_num_proposals, num_classes, self._box_coder.code_size]
        representing predicted (final) refined box encodings.
      proposal_boxes: [batch_size, self.max_num_proposals, 4] representing
        decoded proposal bounding boxes.

    Returns:
      refined_box_predictions: a [batch_size, max_num_proposals, num_classes, 4]
        float tensor representing (padded) refined bounding box predictions
        (for each image in batch, proposal and class).
    """
    tiled_proposal_boxes = tf.tile(
        tf.expand_dims(proposal_boxes, 2), [1, 1, self.num_classes, 1])
    tiled_proposals_boxlist = box_list.BoxList(
        tf.reshape(tiled_proposal_boxes, [-1, 4]))
    decoded_boxes = self._box_coder.decode(
        tf.reshape(refined_box_encodings, [-1, self._box_coder.code_size]),
        tiled_proposals_boxlist)
    return tf.reshape(decoded_boxes.get(),
                      [-1, self.max_num_proposals, self.num_classes, 4]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:26,代码来源:faster_rcnn_meta_arch.py

示例7: _padded_batched_proposals_indicator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def _padded_batched_proposals_indicator(self,
                                          num_proposals,
                                          max_num_proposals):
    """Creates indicator matrix of non-pad elements of padded batch proposals.

    Args:
      num_proposals: Tensor of type tf.int32 with shape [batch_size].
      max_num_proposals: Maximum number of proposals per image (integer).

    Returns:
      A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
    """
    batch_size = tf.size(num_proposals)
    tiled_num_proposals = tf.tile(
        tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
    tiled_proposal_index = tf.tile(
        tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
    return tf.greater(tiled_num_proposals, tiled_proposal_index) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:faster_rcnn_meta_arch.py

示例8: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def __call__(self, observation, state):
    with tf.variable_scope('policy'):
      x = tf.contrib.layers.flatten(observation)
      mean = tf.contrib.layers.fully_connected(
          x,
          self._action_size,
          tf.tanh,
          weights_initializer=self._mean_weights_initializer)
      logstd = tf.get_variable('logstd', mean.shape[1:], tf.float32,
                               self._logstd_initializer)
      logstd = tf.tile(logstd[None, ...],
                       [tf.shape(mean)[0]] + [1] * logstd.shape.ndims)
    with tf.variable_scope('value'):
      x = tf.contrib.layers.flatten(observation)
      for size in self._value_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      value = tf.contrib.layers.fully_connected(x, 1, None)[:, 0]
    return (mean, logstd, value), state 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:20,代码来源:networks.py

示例9: project_hidden

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def project_hidden(x, projection_tensors, hidden_size, num_blocks):
  """Project encoder hidden state into block_dim using projection tensors.

  Args:
    x: Encoder hidden state of shape [-1, hidden_size].
    projection_tensors: Projection tensors used to project the hidden state.
    hidden_size: Dimension of the latent space.
    num_blocks: Number of blocks in DVQ.

  Returns:
    Projected states of shape [-1, num_blocks, block_dim].
  """
  x = tf.reshape(x, shape=[1, -1, hidden_size])
  x_tiled = tf.reshape(
      tf.tile(x, multiples=[num_blocks, 1, 1]),
      shape=[num_blocks, -1, hidden_size])
  x_projected = tf.matmul(x_tiled, projection_tensors)
  x_projected = tf.transpose(x_projected, perm=[1, 0, 2])
  return x_projected 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:discretization.py

示例10: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def __init__(self, num_keypoints, scale_factors=None):
    """Constructor for KeypointBoxCoder.

    Args:
      num_keypoints: Number of keypoints to encode/decode.
      scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
        In addition to scaling ty and tx, the first 2 scalars are used to scale
        the y and x coordinates of the keypoints as well. If set to None, does
        not perform scaling.
    """
    self._num_keypoints = num_keypoints

    if scale_factors:
      assert len(scale_factors) == 4
      for scalar in scale_factors:
        assert scalar > 0
    self._scale_factors = scale_factors
    self._keypoint_scale_factors = None
    if scale_factors is not None:
      self._keypoint_scale_factors = tf.expand_dims(tf.tile(
          [tf.to_float(scale_factors[0]), tf.to_float(scale_factors[1])],
          [num_keypoints]), 1) 
开发者ID:datitran,项目名称:object_detector_app,代码行数:24,代码来源:keypoint_box_coder.py

示例11: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def call(self, seq_value_len_list, mask=None, **kwargs):
        if self.supports_masking:
            if mask is None:
                raise ValueError(
                    "When supports_masking=True,input must support masking")
            uiseq_embed_list = seq_value_len_list
            mask = tf.to_float(mask)
            user_behavior_length = tf.reduce_sum(mask, axis=-1, keep_dims=True)
            mask = tf.expand_dims(mask, axis=2)
        else:
            uiseq_embed_list, user_behavior_length = seq_value_len_list

            mask = tf.sequence_mask(user_behavior_length,
                                    self.seq_len_max, dtype=tf.float32)
            mask = tf.transpose(mask, (0, 2, 1))

        embedding_size = uiseq_embed_list.shape[-1]

        mask = tf.tile(mask, [1, 1, embedding_size])

        uiseq_embed_list *= mask
        hist = uiseq_embed_list
        if self.mode == "max":
            return tf.reduce_max(hist, 1, keep_dims=True)

        hist = tf.reduce_sum(hist, 1, keep_dims=False)

        if self.mode == "mean":
            hist = tf.div(hist, user_behavior_length+self.eps)

        hist = tf.expand_dims(hist, axis=1)
        return hist 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:34,代码来源:sequence.py

示例12: tf_repeat

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def tf_repeat(output, idx, dim1, dim2, bias):
    # tensor equivalent of np.repeat
    # 1d to 3d array tensor
    if bias:
        idx = tf.tile(idx, [1, dim1 * dim2])
        idx = tf.reshape(idx, [-1, dim1, dim2])
        return output * idx
    else:
        return output 
开发者ID:acheketa,项目名称:cwavegan,代码行数:11,代码来源:tpu_model.py

示例13: tf_repeat

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def tf_repeat(output, idx, dim1, dim2):
    # tensor equivalent of np.repeat
    # 1d to 3d array tensor
    idx = tf.tile(idx, [1, dim1 * dim2])
    idx = tf.reshape(idx, [-1, dim1, dim2])
    return output * idx 
开发者ID:acheketa,项目名称:cwavegan,代码行数:8,代码来源:wavegan.py

示例14: tf_repeat

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def tf_repeat(tensor, repeats):
    expanded_tensor = tf.expand_dims(tensor, -1)
    multiples = [1] + repeats
    tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
    repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
    return repeated_tensor

#----------------------------------------------------------------------------
# Generator loss function used in the paper (WGAN + AC-GAN). 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:11,代码来源:loss.py

示例15: upscale2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tile [as 别名]
def upscale2d(x, factor=2):
    assert isinstance(factor, int) and factor >= 1
    if factor == 1: return x
    with tf.variable_scope('Upscale2D'):
        s = x.shape
        x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
        x = tf.tile(x, [1, 1, 1, factor, 1, factor])
        x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x

#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:15,代码来源:networks.py


注:本文中的tensorflow.tile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。