当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.unstack方法代码示例

本文整理汇总了Python中tensorflow.unstack方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.unstack方法的具体用法?Python tensorflow.unstack怎么用?Python tensorflow.unstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.unstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stackedRNN

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def stackedRNN(self, x, dropout, scope, embedding_size, sequence_length, hidden_units):
        n_hidden=hidden_units
        n_layers=3
        # Prepare data shape to match `static_rnn` function requirements
        x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))
        # print(x)
        # Define lstm cells with tensorflow
        # Forward direction cell

        with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope):
            stacked_rnn_fw = []
            for _ in range(n_layers):
                fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
                lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell,output_keep_prob=dropout)
                stacked_rnn_fw.append(lstm_fw_cell)
            lstm_fw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True)

            outputs, _ = tf.nn.static_rnn(lstm_fw_cell_m, x, dtype=tf.float32)
        return outputs[-1] 
开发者ID:dhwajraj,项目名称:deep-siamese-text-similarity,代码行数:21,代码来源:siamese_network_semantic.py

示例2: _create_lstm_inputs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def _create_lstm_inputs(self, net):
    """Splits an input tensor into a list of tensors (features).

    Args:
      net: A feature map of shape [batch_size, num_features, feature_size].

    Raises:
      AssertionError: if num_features is less than seq_length.

    Returns:
      A list with seq_length tensors of shape [batch_size, feature_size]
    """
    num_features = net.get_shape().dims[1].value
    if num_features < self._params.seq_length:
      raise AssertionError('Incorrect dimension #1 of input tensor'
                           ' %d should be bigger than %d (shape=%s)' %
                           (num_features, self._params.seq_length,
                            net.get_shape()))
    elif num_features > self._params.seq_length:
      logging.warning('Ignoring some features: use %d of %d (shape=%s)',
                      self._params.seq_length, num_features, net.get_shape())
      net = tf.slice(net, [0, 0, 0], [-1, self._params.seq_length, -1])

    return tf.unstack(net, axis=1) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:26,代码来源:model.py

示例3: lstm_online

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def lstm_online(cell_fn, num_steps, inputs, state, varscope):
  # inputs is B x num_steps x C, C channels.
  # state is 2 tuple with B x 1 x C1, B x 1 x C2 
  # Output state is always B x 1 x C
  inputs = tf.unstack(inputs, axis=1, num=num_steps)
  state = tf.unstack(state, axis=1, num=1)[0]
  outputs = [] 
  
  if num_steps > 1: 
    varscope.reuse_variables()
  
  for s in range(num_steps):
    output, state = cell_fn(inputs[s], state)
    outputs.append(output)
  outputs = tf.stack(outputs, axis=1)
  state = tf.stack([state], axis=1)
  return outputs, state 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:vision_baseline_lstm.py

示例4: clip_to_window

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def clip_to_window(keypoints, window, scope=None):
  """Clips keypoints to a window.

  This op clips any input keypoints to a window.

  Args:
    keypoints: a tensor of shape [num_instances, num_keypoints, 2]
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip the keypoints.
    scope: name scope.

  Returns:
    new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
  """
  with tf.name_scope(scope, 'ClipToWindow'):
    y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
    x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
    new_keypoints = tf.concat([y, x], 2)
    return new_keypoints 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:keypoint_ops.py

示例5: _CrossConv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def _CrossConv(self, encoded_images):
    """Apply the motion kernel on the encoded_images."""
    cross_conved_images = []
    kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel)
    for (i, encoded_image) in enumerate(encoded_images):
      with tf.variable_scope('cross_conv_%d' % i):
        kernel = kernels[i]

        encoded_image = tf.unstack(encoded_image, axis=0)
        kernel = tf.unstack(kernel, axis=0)
        assert len(encoded_image) == len(kernel)
        assert len(encoded_image) == self.params['batch_size']
        conved_image = []
        for j in xrange(len(encoded_image)):
          conved_image.append(self._CrossConvHelper(
              encoded_image[j], kernel[j]))
        cross_conved_images.append(tf.concat(axis=0, values=conved_image))
        sys.stderr.write('cross_conved shape: %s\n' %
                         cross_conved_images[-1].get_shape())
    return cross_conved_images 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:model.py

示例6: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def __init__(self, num_experts, gates):
    """Create a SparseDispatcher.

    Args:
      num_experts: an integer.
      gates: a `Tensor` of shape `[batch_size, num_experts]`.

    Returns:
      a SparseDispatcher
    """
    self._gates = gates
    self._num_experts = num_experts

    where = tf.to_int32(tf.where(tf.transpose(gates) > 0))
    self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1)
    self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0])
    self._nonzero_gates = tf.gather(
        tf.reshape(self._gates, [-1]),
        self._batch_index * num_experts + self._expert_index) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:expert_utils.py

示例7: combine

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:26,代码来源:expert_utils.py

示例8: get_center_coordinates_and_sizes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def get_center_coordinates_and_sizes(self, scope=None):
    """Computes the center coordinates, height and width of the boxes.

    Args:
      scope: name scope of the function.

    Returns:
      a list of 4 1-D tensors [ycenter, xcenter, height, width].
    """
    with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
      box_corners = self.get()
      ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
      width = xmax - xmin
      height = ymax - ymin
      ycenter = ymin + height / 2.
      xcenter = xmin + width / 2.
      return [ycenter, xcenter, height, width] 
开发者ID:datitran,项目名称:object_detector_app,代码行数:19,代码来源:box_list.py

示例9: unroll

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def unroll(self, actions, env_outputs, core_state):
    """Manual implementation of the network unroll."""
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
    core_output_list = []
    for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(
          functools.partial(tf.where, d), initial_core_state, core_state)
      core_output, core_state = self._core(input_, core_state)
      core_output_list.append(core_output)

    return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state 
开发者ID:deepmind,项目名称:streetlearn,代码行数:21,代码来源:plain_agent.py

示例10: BiRNN

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def BiRNN(self, x, dropout, scope, embedding_size, sequence_length, hidden_units):
        n_hidden=hidden_units
        n_layers=3
        # Prepare data shape to match `static_rnn` function requirements
        x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))
        print(x)
        # Define lstm cells with tensorflow
        # Forward direction cell
        with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope):
            stacked_rnn_fw = []
            for _ in range(n_layers):
                fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
                lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell,output_keep_prob=dropout)
                stacked_rnn_fw.append(lstm_fw_cell)
            lstm_fw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True)

        with tf.name_scope("bw"+scope),tf.variable_scope("bw"+scope):
            stacked_rnn_bw = []
            for _ in range(n_layers):
                bw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
                lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell,output_keep_prob=dropout)
                stacked_rnn_bw.append(lstm_bw_cell)
            lstm_bw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_bw, state_is_tuple=True)
        # Get lstm cell output

        with tf.name_scope("bw"+scope),tf.variable_scope("bw"+scope):
            outputs, _, _ = tf.nn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
        return outputs[-1] 
开发者ID:dhwajraj,项目名称:deep-siamese-text-similarity,代码行数:30,代码来源:siamese_network.py

示例11: biLSTMCell

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def biLSTMCell(x, hiddenSize):
        input_x = tf.transpose(x, [1, 0, 2])
        input_x = tf.unstack(input_x)
        lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(hiddenSize, forget_bias=1.0, state_is_tuple=True)
        lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(hiddenSize, forget_bias=1.0, state_is_tuple=True)
        output, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, input_x, dtype=tf.float32)
        output = tf.stack(output)
        output = tf.transpose(output, [1, 0, 2])
        return output 
开发者ID:shuaihuaiyi,项目名称:QA,代码行数:11,代码来源:qaLSTMNet.py

示例12: build_reward

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def build_reward(self):

        with tf.name_scope('permutations'):

            # Reorder input % tour
            self.ordered_input_ = []
            for input_, path in zip(tf.unstack(self.input_,axis=0), tf.unstack(self.positions,axis=0)): # Unstack % batch axis
                self.ordered_input_.append(tf.gather_nd(input_,tf.expand_dims(path,1)))
            self.ordered_input_ = tf.transpose(tf.stack(self.ordered_input_,0),[2,1,0]) # [batch size, seq length +1 , features] to [features, seq length +1, batch_size]   Rq: +1 because end = start = first_city

            # Ordered coordinates
            ordered_x_ = self.ordered_input_[0] # [seq length +1, batch_size]
            delta_x2 = tf.transpose(tf.square(ordered_x_[1:]-ordered_x_[:-1]),[1,0]) # [batch_size, seq length]        delta_x**2
            ordered_y_ = self.ordered_input_[1] # [seq length +1, batch_size]
            delta_y2 = tf.transpose(tf.square(ordered_y_[1:]-ordered_y_[:-1]),[1,0]) # [batch_size, seq length]        delta_y**2

        with tf.name_scope('environment'):

            # Get tour length (euclidean distance)
            inter_city_distances = tf.sqrt(delta_x2+delta_y2) # sqrt(delta_x**2 + delta_y**2) this is the euclidean distance between each city: depot --> ... ---> depot      [batch_size, seq length]
            self.distances = tf.reduce_sum(inter_city_distances, axis=1) # [batch_size]
            #variable_summaries('tour_length',self.distances, with_max_min = True)

            # Define reward from tour length
            self.reward = tf.cast(self.distances,tf.float32)
            variable_summaries('reward',self.reward, with_max_min = True) 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:28,代码来源:actor.py

示例13: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def __call__(self, x, initial_state, seq_length):
    with tf.variable_scope(self.name, reuse=self.reuse) as vs:
      cell = tf.contrib.rnn.MultiRNNCell([
          tf.contrib.rnn.BasicLSTMCell(
              self.cell_size,
              forget_bias=0.0,
              reuse=tf.get_variable_scope().reuse)
          for _ in xrange(self.num_layers)
      ])

      # shape(x) = (batch_size, num_timesteps, embedding_dim)
      # Convert into a time-major list for static_rnn
      x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))

      lstm_out, next_state = tf.contrib.rnn.static_rnn(
          cell, x, initial_state=initial_state, sequence_length=seq_length)

      # Merge time and batch dimensions
      # shape(lstm_out) = timesteps * (batch_size, cell_size)
      lstm_out = tf.concat(lstm_out, 0)
      # shape(lstm_out) = (timesteps*batch_size, cell_size)

      if self.keep_prob < 1.:
        lstm_out = tf.nn.dropout(lstm_out, self.keep_prob)

      if self.reuse is None:
        self.trainable_weights = vs.global_variables()

    self.reuse = True

    return lstm_out, next_state 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:33,代码来源:layers.py

示例14: input_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def input_fn(subset, num_shards):
  """Create input graph for model.

  Args:
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  if subset == 'train':
    batch_size = FLAGS.train_batch_size
  elif subset == 'validate' or subset == 'eval':
    batch_size = FLAGS.eval_batch_size
  else:
    raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'')
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and FLAGS.use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:40,代码来源:cifar10_main.py

示例15: running_combine

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unstack [as 别名]
def running_combine(fss_logits, confs_probs, incremental_locs,
                    incremental_thetas, previous_sum_num, previous_sum_denom,
                    previous_max_denom, map_size, num_steps):
  # fss_logits is B x N x H x W x C
  # confs_logits is B x N x H x W x C
  # incremental_locs is B x N x 2
  # incremental_thetas is B x N x 1
  # previous_sum_num etc is B x 1 x H x W x C

  with tf.name_scope('combine_{:d}'.format(num_steps)):
    running_sum_nums_ = []; running_sum_denoms_ = [];
    running_max_denoms_ = [];

    fss_logits_ = tf.unstack(fss_logits, axis=1, num=num_steps)
    confs_probs_ = tf.unstack(confs_probs, axis=1, num=num_steps)
    incremental_locs_ = tf.unstack(incremental_locs, axis=1, num=num_steps)
    incremental_thetas_ = tf.unstack(incremental_thetas, axis=1, num=num_steps)
    running_sum_num = tf.unstack(previous_sum_num, axis=1, num=1)[0]
    running_sum_denom = tf.unstack(previous_sum_denom, axis=1, num=1)[0]
    running_max_denom = tf.unstack(previous_max_denom, axis=1, num=1)[0]

    for i in range(num_steps):
      # Rotate the previous running_num and running_denom
      running_sum_num, running_sum_denom, running_max_denom = rotate_preds(
          incremental_locs_[i], incremental_thetas_[i], map_size,
          [running_sum_num, running_sum_denom, running_max_denom],
          output_valid_mask=False)[0]
      # print i, num_steps, running_sum_num.get_shape().as_list()
      running_sum_num = running_sum_num + fss_logits_[i] * confs_probs_[i]
      running_sum_denom = running_sum_denom + confs_probs_[i]
      running_max_denom = tf.maximum(running_max_denom, confs_probs_[i])
      running_sum_nums_.append(running_sum_num)
      running_sum_denoms_.append(running_sum_denom)
      running_max_denoms_.append(running_max_denom)

    running_sum_nums = tf.stack(running_sum_nums_, axis=1)
    running_sum_denoms = tf.stack(running_sum_denoms_, axis=1)
    running_max_denoms = tf.stack(running_max_denoms_, axis=1)
    return running_sum_nums, running_sum_denoms, running_max_denoms 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:41,代码来源:cmp.py


注:本文中的tensorflow.unstack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。