当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.reverse函数代码示例

本文整理汇总了Python中tensorflow.reverse函数的典型用法代码示例。如果您正苦于以下问题:Python reverse函数的具体用法?Python reverse怎么用?Python reverse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了reverse函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sparse_transpose

def sparse_transpose(sp_input):
    transposed_indices = tf.reverse(tf.cast(sp_input.indices, tf.int32), [False, True])
    transposed_values = sp_input.values
    transposed_shape = tf.reverse(tf.cast(sp_input.shape, tf.int32), [True])
    sp_output = tf.SparseTensor(tf.cast(transposed_indices, tf.int64), transposed_values, tf.cast(transposed_shape, tf.int64))
    sp_output = tf.sparse_reorder(sp_output)
    return sp_output
开发者ID:hedgefair,项目名称:pycodesuggest,代码行数:7,代码来源:tfutils.py

示例2: ndlstm_base_dynamic

def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with tf.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = tf.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = tf.to_int64(tf.fill([batch_size], sequence_length))
    if reverse:
      inputs = tf.reverse(inputs, [True, False, False])
    outputs, _ = tf.nn.dynamic_rnn(lstm_cell,
                                   inputs,
                                   sequence_lengths,
                                   state,
                                   time_major=True)
    if reverse:
      outputs = tf.reverse(outputs, [True, False, False])
    return outputs
开发者ID:brchiu,项目名称:tensorflow,代码行数:33,代码来源:lstm1d.py

示例3: cummax

def cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.

    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with ops.name_scope(name, "Cummax", [x]) as name:
        x = ops.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
        return cmax
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:27,代码来源:math.py

示例4: get_states_b

    def get_states_b(self):
        """
        Iterates through time/ sequence to get all hidden state
        """

        all_hidden_states, all_memory_states = self.get_states_f()

        # Reversing the hidden and memory state to get the final hidden and
        # memory state
        last_hidden_states = tf.reverse(
            all_hidden_states, [True, False, False])[0, :, :]
        last_memory_states = tf.reverse(
            all_memory_states, [True, False, False])[0, :, :]

        # For backward pass using the last hidden and memory of the forward
        # pass
        initial_hidden = tf.pack([last_hidden_states, last_memory_states])

        # Getting all hidden state throuh time
        all_hidden_memory_states = tf.scan(self.Lstm_b,
                                           self.processed_input_rev,
                                           initializer=initial_hidden,
                                           name='states')

        # Now reversing the states to keep those in original order
        all_hidden_states = tf.reverse(all_hidden_memory_states[
                                       :, 0, :, :], [True, False, False])
        all_memory_states = tf.reverse(all_hidden_memory_states[
                                       :, 1, :, :], [True, False, False])

        return all_hidden_states, all_memory_states
开发者ID:RoGoSo,项目名称:Tensorflow-tutorial,代码行数:31,代码来源:bi_directional_lstm.py

示例5: forward

    def forward(self, x, x_mask=None, context_layer=None):

        assert not (self.reverse_alternation and x_mask == None)

#        assert (context_layer == None or
#                tf.shape(context_layer)[-1] == self.context_state_size)

        def create_step_fun(gru):
            def step_fn(prev_state, x):
                gates_x2d, proposal_x2d = x[0], x[1]
                new_state = gru.forward(prev_state,
                                        gates_x=gates_x2d,
                                        proposal_x=proposal_x2d)
                if len(x) > 2:
                    mask = x[2]
                    new_state *= mask # batch x 1
                    # first couple of states of reversed encoder should be zero
                    # this is why we need to multiply by mask
                    # this way, when the reversed encoder reaches actual words
                    # the state will be zeros and not some accumulated garbage
                return new_state
            return step_fn

        init_state = tf.zeros(shape=[self.batch_size, self.state_size],
                              dtype=tf.float32)
        if x_mask != None:
            x_mask_r = tf.reverse(x_mask, axis=[0])
            x_mask_bwd = tf.expand_dims(x_mask_r, axis=[2]) #seqLen x batch x 1

        for i, gru in enumerate(self.grus):
            layer = RecurrentLayer(initial_state=init_state,
                                   step_fn=create_step_fun(gru))
            if context_layer == None:
                x2 = x
            else:
                x2 = tf.concat([x, context_layer], axis=-1)
            if not self.alternating:
                left_to_right = True
            else:
                if self.reverse_alternation:
                    left_to_right = (i % 2 == 1)
                else:
                    left_to_right = (i % 2 == 0)
            if left_to_right:
                # Recurrent state flows from left to right in this layer.
                gates_x, proposal_x = gru.precompute_from_x(x2)
                h = layer.forward((gates_x, proposal_x))
            else:
                # Recurrent state flows from right to left in this layer.
                x2_reversed = tf.reverse(x2, axis=[0])
                gates_x, proposal_x = gru.precompute_from_x(x2_reversed)
                h_reversed = layer.forward((gates_x, proposal_x, x_mask_bwd))
                h = tf.reverse(h_reversed, axis=[0])
            # Compute the word states, which will become the input for the
            # next layer (or the output of the stack if we're at the top).
            if i == 0:
                x = h
            else:
                x += h # Residual connection
        return x
开发者ID:rsennrich,项目名称:nematus,代码行数:60,代码来源:layers.py

示例6: embed_sequences

 def embed_sequences(self, embed_sequence_batch):
     """Return sentence embeddings as a tensor with with shape
     [batch_size, hidden_size * 2]
     """
     forward_values = embed_sequence_batch.values
     forward_mask = embed_sequence_batch.mask
     backward_values = tf.reverse(forward_values, [False, True, False])
     backward_mask = tf.reverse(forward_mask, [False, True])
     # Initialize LSTMs
     self._forward_lstm = LSTM(self.hidden_size, return_sequences=True)
     self._backward_lstm = LSTM(self.hidden_size, return_sequences=True)
     # Pass input through the LSTMs
     # Shape: (batch_size, seq_length, hidden_size)
     forward_seq = self._forward_lstm(forward_values, forward_mask)
     forward_seq.set_shape((None, self.seq_length, self.hidden_size))
     backward_seq = self._backward_lstm(backward_values, backward_mask)
     backward_seq.set_shape((None, self.seq_length, self.hidden_size))
     # Stitch the outputs together --> hidden states (for computing attention)
     # Final dimension: (batch_size, seq_length, hidden_size * 2)
     lstm_states = tf.concat(2, [forward_seq, tf.reverse(backward_seq, [False, True, False])])
     self._hidden_states = SequenceBatch(lstm_states, forward_mask)
     # Stitch the final outputs together --> sequence embedding
     # Final dimension: (batch_size, hidden_size * 2)
     seq_length = tf.shape(forward_values)[1]
     forward_final = tf.slice(forward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
     backward_final = tf.slice(backward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
     return tf.squeeze(tf.concat(2, [forward_final, backward_final]), [1])
开发者ID:siddk,项目名称:lang2program,代码行数:27,代码来源:model.py

示例7: random_transformation2

def random_transformation2(x, y, padding, phase_train, rnd_vflip=True, rnd_hflip=True, rnd_transpose=True, rnd_colour=False):
    """
    Perform random crop, flip, transpose, hue, saturation, brightness, contrast.

    Args:
        x: [B, H, W, 3]
        y: [B, T, H, W]
        padding: int
        phase_train: bool
    """
    # Random image transformation layers.
    phase_train_f = tf.to_float(phase_train)
    x_shape = tf.shape(x)
    y_shape = tf.shape(y)
    num_ex = x_shape[0]
    inp_height = x_shape[1]
    inp_width = x_shape[2]
    inp_depth_x = x_shape[3]
    inp_depth_y = y_shape[3]

    # Add padding
    x_pad = tf.pad(x, [[0, 0], [padding, padding], [padding, padding], [0, 0]])
    y_pad = tf.pad(y, [[0, 0], [padding, padding], [padding, padding], [0, 0]])

    # Random crop
    offset = tf.random_uniform([2], dtype='int32', maxval=padding * 2)
    x_rand = tf.slice(x_pad, tf.pack([0, offset[0], offset[1], 0]),
                      tf.pack([-1, inp_height, inp_width, inp_depth_x]))
    y_rand = tf.slice(y_pad, tf.pack([0, offset[0], offset[1], 0]),
                      tf.pack([-1, inp_height, inp_width, inp_depth_y]))

    # Center slices (for inference)
    x_ctr = tf.slice(x_pad, [0, padding, padding, 0],
                     tf.pack([-1, inp_height, inp_width, -1]))
    y_ctr = tf.slice(y_pad, [0, padding, padding, 0],
                     tf.pack([-1, inp_height, inp_width, -1]))

    # Random horizontal & vertical flip & transpose
    rand_h = tf.random_uniform([1], 1.0 - float(rnd_hflip), 1.0)
    rand_v = tf.random_uniform([1], 1.0 - float(rnd_vflip), 1.0)
    mirror = tf.pack([1.0, rand_v[0], rand_h[0], 1.0]) < 0.5
    x_rand = tf.reverse(x_rand, mirror)
    y_rand = tf.reverse(y_rand, mirror)
    rand_t = tf.random_uniform([1], 1.0 - float(rnd_transpose), 1.0)
    do_tr = tf.cast(rand_t[0] < 0.5, 'int32')
    x_rand = tf.transpose(x_rand, tf.pack([0, 1 + do_tr, 2 - do_tr, 3]))
    y_rand = tf.transpose(y_rand, tf.pack([0, 1 + do_tr, 2 - do_tr, 3]))

    # Random hue, saturation, brightness, contrast
    if rnd_colour:
        x_rand = random_hue(x_rand, 0.1)
        x_rand = random_saturation(x_rand, 0.9, 1.1)
        x_rand = tf.image.random_brightness(x_rand, 0.1)
        x_rand = tf.image.random_contrast(x_rand, 0.9, 1.1)

    x = (1.0 - phase_train_f) * x_ctr + phase_train_f * x_rand
    y = (1.0 - phase_train_f) * y_ctr + phase_train_f * y_rand

    return x, y
开发者ID:lrjconan,项目名称:img-count,代码行数:59,代码来源:image_ops.py

示例8: _compute_rnn_outputs

 def _compute_rnn_outputs(self):
     reversed_inputs = tf.reverse(self.inputs, [False, True, False])
     reversed_resets = tf.reverse(self.resets, [False, True, False])
     self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training,
                          self.num_layers, self.hidden_layer_size,
                          self.init_scale, self.dropout_keep_prob)
     outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False])
     return outputs
开发者ID:rdipietro,项目名称:miccai-2016-surgical-activity-rec,代码行数:8,代码来源:models.py

示例9: image_distortions

def image_distortions(image, distortions):
    distort_left_right_random = distortions[0]
    mirror = tf.less(tf.pack([1.0, distort_left_right_random, 1.0]), 0.5)
    image = tf.reverse(image, mirror)
    distort_up_down_random = distortions[1]
    mirror = tf.less(tf.pack([distort_up_down_random, 1.0, 1.0]), 0.5)
    image = tf.reverse(image, mirror)
    return image
开发者ID:mtourne,项目名称:nerveseg,代码行数:8,代码来源:nerveseg_input.py

示例10: reconstruct

  def reconstruct(self, inputs, samples=1, sample_static=False,
                  sample_dynamic=False, swap_static=False, swap_dynamic=False,
                  fix_static=False, fix_dynamic=False):
    """Reconstruct the given input sequences.

    Args:
      inputs: A batch of image sequences `x_{1:T}` of shape
        `[batch_size, timesteps, height, width, channels]`.
      samples: Number of samples to draw from the latent distributions.
      sample_static: Boolean for whether or not to randomly sample the
        static latent variable `f` from its prior distribution.
      sample_dynamic: Boolean for whether or not to randomly sample the
        dynamic latent variable `z_{1:T}` from its prior distribution.
      swap_static: Boolean for whether or not to swap the encodings for
        the static latent variable `f` between the examples.
      swap_dynamic: Boolean for whether or not to swap the encodings for
        the dynamic latent variable `z_{1:T}` between the examples.
      fix_static: Boolean for whether or not to share the same random
        sample of the static latent variable `f` from its prior across
        all examples.
      fix_dynamic: Boolean for whether or not to share the same random
        sample of the dynamic latent variable `z_{1:T}` from its prior
        across all examples.

    Returns:
      A batched Independent distribution wrapping a set of Normal
      distributions over the pixels of the reconstruction of the input,
      where the Independent distribution has event shape [height, width,
      channels], batch shape [samples, batch_size, timesteps], and
      sample shape [sample_shape, samples, batch_size, timesteps,
      height, width, channels].
    """
    batch_size = tf.shape(inputs)[-5]
    length = len(tf.unstack(inputs, axis=-4))  # hack for graph mode

    features = self.compressor(inputs)  # (..., batch, timesteps, hidden)

    if sample_static:
      static_sample, _ = self.sample_static_prior(
          samples, batch_size, fix_static)
    else:
      static_sample, _ = self.sample_static_posterior(features, samples)

    if swap_static:
      static_sample = tf.reverse(static_sample, axis=[1])

    if sample_dynamic:
      dynamic_sample, _ = self.sample_dynamic_prior(
          samples, batch_size, length, fix_dynamic)
    else:
      dynamic_sample, _ = self.sample_dynamic_posterior(
          features, samples, static_sample)

    if swap_dynamic:
      dynamic_sample = tf.reverse(dynamic_sample, axis=[1])

    likelihood = self.decoder((dynamic_sample, static_sample))
    return likelihood
开发者ID:asudomoeva,项目名称:probability,代码行数:58,代码来源:disentangled_vae.py

示例11: __call__

    def __call__(self, cell_output, scores, scores_state, ignore_mask):
        # apply exponential moving average with interpolation gate weight
        # to scores from previous time which are equal to probs at this point
        # different from original NTM where it is applied after softmax
        i_g = self.inter_gate(cell_output)

        # scores limited by time
        scores = tf.concat([i_g * scores[:, :-1] + (1 - i_g) * scores_state,
                            scores[:, -1:]], 1)
        next_scores_state = scores

        # create probabilities for attention
        if self.sparse_attention:
            probs = tf.contrib.sparsemax.sparsemax(scores)
        else:
            probs = tf.nn.softmax(scores)

        if self.shift_weight is not None:
            s_w = self.shift_weight(cell_output)

            # we want to go back in time during convolution
            conv_probs = tf.reverse(probs, axis=[1])

            # preare probs for tf.nn.depthwise_conv2d
            # [in_width, in_channels=batch]
            conv_probs = tf.transpose(conv_probs, [1, 0])
            # [batch=1, in_height=1, in_width=time+1, in_channels=batch]
            conv_probs = conv_probs[tf.newaxis, tf.newaxis, :, :]

            # [filter_height=1, filter_width=2*attn_shift_range+1,
            #   in_channels=batch, channel_multiplier=1]
            conv_s_w = tf.transpose(s_w, [1, 0])
            conv_s_w = conv_s_w[tf.newaxis, :, :, tf.newaxis]

            # perform 1d convolution
            # [batch=1, out_height=1, out_width=time+1, out_channels=batch]
            conv_probs = tf.nn.depthwise_conv2d_native(conv_probs, conv_s_w,
                                                       [1, 1, 1, 1], 'SAME')
            conv_probs = conv_probs[0, 0, :, :]
            conv_probs = tf.transpose(conv_probs, [1, 0])

            probs = tf.reverse(conv_probs, axis=[1])

        # Sharpening
        g_sh = self.gamma_sharp(cell_output)

        powed_probs = tf.pow(probs, g_sh)
        probs = powed_probs / (
                tf.reduce_sum(powed_probs, 1, keepdims=True) + 1e-32)

        # set probs for no intents and action_listens to zero
        if ignore_mask is not None:
            probs = tf.concat([tf.where(ignore_mask,
                                        tf.zeros_like(probs[:, :-1]),
                                        probs[:, :-1]),
                               probs[:, -1:]], 1)
        return probs, next_scores_state
开发者ID:rohitjun08,项目名称:rasa_core,代码行数:57,代码来源:tf_utils.py

示例12: discounted_return

def discounted_return(reward, length, discount):
  """Discounted Monte-Carlo returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
      tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return')
开发者ID:shamanez,项目名称:agents,代码行数:9,代码来源:utility.py

示例13: lambda_advantage

def lambda_advantage(reward, value, length, discount):
  """Generalized Advantage Estimation."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
  delta = reward + discount * next_value - value
  advantage = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
      tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
开发者ID:shamanez,项目名称:agents,代码行数:11,代码来源:utility.py

示例14: ni_slice

def ni_slice(sub_values, last_ind, axis=0):
  # TODO: Allow both to be negative indexed...
  ndims = len(shape(sub_values))
  im1 = 0 + abs(last_ind)
  i = [[None, None]] * ndims
  i[axis] = [im1, None]
  am = [False] * ndims
  am[axis] = True
  sl = [slice(*ii) for ii in i]
  ti = tf.reverse(sub_values, am)[sl]
  return tf.reverse(ti, am)
开发者ID:danabo,项目名称:magenta,代码行数:11,代码来源:multi_event_rnn_lib.py

示例15: lambda_return

def lambda_return(reward, value, length, discount, lambda_):
  """TD-lambda returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  sequence = mask * reward + discount * value * (1 - lambda_)
  discount = mask * discount * lambda_
  sequence = tf.stack([sequence, discount], 2)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * agg,
      tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
      tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return')
开发者ID:shamanez,项目名称:agents,代码行数:12,代码来源:utility.py


注:本文中的tensorflow.reverse函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。