当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.parallel_stack方法代码示例

本文整理汇总了Python中tensorflow.parallel_stack方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.parallel_stack方法的具体用法?Python tensorflow.parallel_stack怎么用?Python tensorflow.parallel_stack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.parallel_stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: to_training_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def to_training_tensor(time_ordered_feature_tensor_dicts, feature_name):
        """ Calls to_tensors plus converts the data to a single large tensor.

        This returns a single tf tensor for a single feature
        in a format you can pass directly to a Keras model.

        # Arguments

        time_ordered_feature_tensor_dicts: A dictionary with keys which are strings and values which are lists of tensors.
        feature_name: A string identifying which specific feature in the dictionary to convert.
        """
        if feature_name is None or feature_name is '':
            return None
        # image of a clear scene view, originally from 'view_clear_scene' step,
        # There is also a move_to_grasp versions copied from view_clear_scene then repeated once for each time step.
        op_batch = GraspDataset.to_tensors(time_ordered_feature_tensor_dicts, feature_name)
        # make one long list from each list of lists
        op_batch = list(itertools.chain.from_iterable(op_batch))
        # stack all the data in a way that will let it run in parallel
        op_batch = tf.parallel_stack(op_batch)
        return op_batch 
开发者ID:jhu-lcsr,项目名称:costar_plan,代码行数:23,代码来源:grasp_dataset.py

示例2: input_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def input_fn(subset, num_shards):
  """Create input graph for model.

  Args:
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  if subset == 'train':
    batch_size = FLAGS.train_batch_size
  elif subset == 'validate' or subset == 'eval':
    batch_size = FLAGS.eval_batch_size
  else:
    raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'')
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and FLAGS.use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:40,代码来源:cifar10_main.py

示例3: get_log_probabilities

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def get_log_probabilities(self, data):
        tf_log_probabilities = []
        for dim in range(self.dims):
            tf_log_means = tf.log(self.tf_means[dim])
            tf_log_probabilities.append(
                tf.gather(tf_log_means, data[0][:, dim])
            )

        return tf.reduce_sum(tf.parallel_stack(tf_log_probabilities), axis=0) 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:11,代码来源:categorical_distribution.py

示例4: get_parameter_updaters

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def get_parameter_updaters(self, data, gamma_weighted, gamma_sum):
        tf_parameter_updaters = []
        for dim in range(self.dims):
            tf_partition = tf.dynamic_partition(gamma_weighted, data[0][:, dim], self.counts[dim])
            tf_new_means = tf.parallel_stack([tf.reduce_sum(p) for p in tf_partition])
            tf_parameter_updaters.append(self.tf_means[dim].assign(tf_new_means))

        return tf_parameter_updaters 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:10,代码来源:categorical_distribution.py

示例5: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)
        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch, records 
开发者ID:IntelAI,项目名称:models,代码行数:40,代码来源:preprocessing.py

示例6: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)

        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch 
开发者ID:IntelAI,项目名称:models,代码行数:41,代码来源:preprocessing.py

示例7: input_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:42,代码来源:cifar10_main.py

示例8: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset, use_datasets, cache_data,
                  shift_ratio=-1):
        del dataset, use_datasets, cache_data, shift_ratio
        if (not hasattr(self, 'fake_images') or
                not hasattr(self, 'fake_labels')):
            raise ValueError(
                'Must call set_fake_data() before calling minibatch '
                'on TestImagePreprocessor')
        if self.expected_subset is not None:
            assert subset == self.expected_subset

        with tf.name_scope('batch_processing'):
            image_slice, label_slice = tf.train.slice_input_producer(
                [self.fake_images, self.fake_labels],
                shuffle=False,
                name='image_slice')
            raw_images, raw_labels = tf.train.batch(
                [image_slice, label_slice], batch_size=self.batch_size,
                name='image_batch')
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            for i in xrange(self.batch_size):
                split_index = i % self.num_splits
                raw_image = tf.cast(raw_images[i], self.dtype)
                images[split_index].append(raw_image)
                labels[split_index].append(raw_labels[i])
            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])

            return images, labels 
开发者ID:snuspl,项目名称:parallax,代码行数:33,代码来源:preprocessing.py

示例9: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    del dataset, use_datasets, cache_data, shift_ratio
    if (not hasattr(self, 'fake_images') or
        not hasattr(self, 'fake_labels')):
      raise ValueError('Must call set_fake_data() before calling minibatch '
                       'on TestImagePreprocessor')
    if self.expected_subset is not None:
      assert subset == self.expected_subset

    with tf.name_scope('batch_processing'):
      image_slice, label_slice = tf.train.slice_input_producer(
          [self.fake_images, self.fake_labels],
          shuffle=False,
          name='image_slice')
      raw_images, raw_labels = tf.train.batch(
          [image_slice, label_slice], batch_size=self.batch_size,
          name='image_batch')
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        raw_image = tf.cast(raw_images[i], self.dtype)
        images[split_index].append(raw_image)
        labels[split_index].append(raw_labels[i])
      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])

      return images, labels 
开发者ID:awslabs,项目名称:deeplearning-benchmark,代码行数:32,代码来源:preprocessing.py

示例10: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset, use_datasets, cache_data,
                  shift_ratio=-1):
        del dataset, use_datasets, cache_data, shift_ratio
        if (not hasattr(self, 'fake_images') or not hasattr(self, 'fake_labels')):
            raise ValueError('Must call set_fake_data() before calling minibatch '
                             'on TestImagePreprocessor')
        if self.expected_subset is not None:
            assert subset == self.expected_subset

        with tf.name_scope('batch_processing'):
            image_slice, label_slice = tf.train.slice_input_producer(
                [self.fake_images, self.fake_labels],
                shuffle=False,
                name='image_slice')
            raw_images, raw_labels = tf.train.batch(
                [image_slice, label_slice], batch_size=self.batch_size,
                name='image_batch')
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            for i in xrange(self.batch_size):
                split_index = i % self.num_splits
                raw_image = tf.cast(raw_images[i], self.dtype)
                images[split_index].append(raw_image)
                labels[split_index].append(raw_labels[i])
            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])

            return images, labels 
开发者ID:balancap,项目名称:tf-imagenet,代码行数:31,代码来源:preprocessing_synthetic.py

示例11: device_minibatches

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def device_minibatches(self, total_batch_size):
        record_input = data_flow_ops.RecordInput(
            file_pattern=os.path.join(FLAGS.data_dir, '%s-*' % self.subset),
            parallelism=64,
            # Note: This causes deadlock during init if larger than dataset
            buffer_size=FLAGS.input_buffer_size,
            batch_size=total_batch_size)
        records = record_input.get_yield_op()
        # Split batch into individual images
        records = tf.split(records, total_batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        # Deserialize and preprocess images into batches for each device
        images = defaultdict(list)
        labels = defaultdict(list)
        with tf.name_scope('input_pipeline'):
            for i, record in enumerate(records):
                imgdata, label, bbox, text = deserialize_image_record(record)
                image = self.preprocess(imgdata, bbox, thread_id=i)
                label -= 1 # Change to 0-based (don't use background class)
                device_num = i % self.num_devices
                images[device_num].append(image)
                labels[device_num].append(label)
            # Stack images back into a sub-batch for each device
            for device_num in range(self.num_devices):
                images[device_num] = tf.parallel_stack(images[device_num])
                labels[device_num] = tf.concat(labels[device_num], 0)
                images[device_num] = tf.reshape(images[device_num],
                                                [-1, self.height, self.width, 3])
                images[device_num] = tf.clip_by_value(images[device_num], 0., 255.)
                images[device_num] = tf.cast(images[device_num], self.dtype)
        return images, labels 
开发者ID:HewlettPackard,项目名称:dlcookbook-dlbs,代码行数:33,代码来源:nvcnn.py

示例12: minibatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=0):
    """Get test image batches."""
    del dataset, use_datasets, cache_data
    if (not hasattr(self, 'fake_images') or
        not hasattr(self, 'fake_labels')):
      raise ValueError('Must call set_fake_data() before calling minibatch '
                       'on TestImagePreprocessor')
    if self.expected_subset is not None:
      assert subset == self.expected_subset

    shift_ratio = shift_ratio or self.shift_ratio
    fake_images = cnn_util.roll_numpy_batches(self.fake_images, self.batch_size,
                                              shift_ratio)
    fake_labels = cnn_util.roll_numpy_batches(self.fake_labels, self.batch_size,
                                              shift_ratio)

    with tf.name_scope('batch_processing'):
      image_slice, label_slice = tf.train.slice_input_producer(
          [fake_images, fake_labels],
          shuffle=False,
          name='image_slice')
      raw_images, raw_labels = tf.train.batch(
          [image_slice, label_slice], batch_size=self.batch_size,
          name='image_batch')
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        raw_image = tf.cast(raw_images[i], self.dtype)
        images[split_index].append(raw_image)
        labels[split_index].append(raw_labels[i])
      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])

      return images, labels 
开发者ID:HewlettPackard,项目名称:dlcookbook-dlbs,代码行数:39,代码来源:preprocessing.py

示例13: _get_interp_idxs_weights_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def _get_interp_idxs_weights_2d(x, xp, y, yp, x_log_spacing=False):
  with tf.name_scope('get_interp_idxs_weights_2d'):
    if x_log_spacing:
      x = tf.log(x)
      xp = tf.log(xp)

    with tf.control_dependencies([yp]):
      xp = tf.tile(xp, yp.shape)
    xyp = tf.expand_dims(tf.parallel_stack([xp, yp]), 1)
    xy0 = tf.reshape(tf.parallel_stack([x[0], y[0]]), [2, 1, 1])
    xy1 = tf.reshape(tf.parallel_stack([x[1], y[1]]), [2, 1, 1])

    spacing = xy1 - xy0
    ind_grid = (xyp - xy0) / spacing
    ind = tf.cast(ind_grid, tf.int32) + [[[0], [1]]]

    max_ind = [[[x.shape[0].value - 1]], [[y.shape[0].value - 1]]]
    ind = tf.minimum(ind, max_ind)
    ind_float = tf.cast(ind, tf.float64)

    xy_grid = ind_float * spacing + xy0

    weight = tf.abs(xyp - xy_grid) / spacing
    if x_log_spacing:
      weight = tf.parallel_stack([tf.exp(weight[0]), weight[1]])
    weight = 1. - weight

    weight_sum = tf.reduce_sum(weight, axis=1, keep_dims=True)
    weight /= weight_sum

    return ind, weight 
开发者ID:brain-research,项目名称:nngp,代码行数:33,代码来源:interp.py

示例14: _initialize_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def _initialize_graph(self, dtype=tf.float64):
        self.tf_train_step = None
        self.tf_component_parameters = None
        self.tf_mean_log_likelihood = None

        with self.tf_graph.as_default():
            tf_component_log_probabilities = []
            for component_id in range(len(self.components)):
                worker_id = self.mapping[component_id]
                with tf.device(self.workers[worker_id]):
                    self.components[component_id].initialize(dtype)
                    tf_component_log_probabilities.append(
                        self.components[component_id].get_log_probabilities(
                            self.tf_worker_data[worker_id]
                        )
                    )

            tf_log_components = tf.parallel_stack(tf_component_log_probabilities)
            tf_log_weighted = tf_log_components + tf.expand_dims(tf.log(self.tf_weights), 1)
            tf_log_shift = tf.expand_dims(tf.reduce_max(tf_log_weighted, 0), 0)
            tf_exp_log_shifted = tf.exp(tf_log_weighted - tf_log_shift)
            tf_exp_log_shifted_sum = tf.reduce_sum(tf_exp_log_shifted, 0)
            tf_log_likelihood = tf.reduce_sum(tf.log(tf_exp_log_shifted_sum)) + tf.reduce_sum(tf_log_shift)

            self.tf_mean_log_likelihood = tf_log_likelihood / (self.tf_num_points * self.tf_dims)

            tf_gamma = tf_exp_log_shifted / tf_exp_log_shifted_sum
            tf_gamma_sum = tf.reduce_sum(tf_gamma, 1)
            tf_gamma_weighted = tf_gamma / tf.expand_dims(tf_gamma_sum, 1)
            tf_gamma_sum_split = tf.unstack(tf_gamma_sum)
            tf_gamma_weighted_split = tf.unstack(tf_gamma_weighted)

            tf_component_updaters = []
            for component_id in range(len(self.components)):
                worker_id = self.mapping[component_id]
                with tf.device(self.workers[worker_id]):
                    tf_component_updaters.extend(
                        self.components[component_id].get_parameter_updaters(
                            self.tf_worker_data[worker_id],
                            tf_gamma_weighted_split[component_id],
                            tf_gamma_sum_split[component_id]
                        )
                    )

            tf_new_weights = tf_gamma_sum / self.tf_num_points
            tf_weights_updater = self.tf_weights.assign(tf_new_weights)
            tf_all_updaters = tf_component_updaters + [tf_weights_updater]

            self.tf_train_step = tf.group(*tf_all_updaters)
            self.tf_component_parameters = [
                comp.get_parameters() for comp in self.components
            ] 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:54,代码来源:mixture_model.py

示例15: parallel_stack

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import parallel_stack [as 别名]
def parallel_stack(values, name="parallel_stack"):
  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.

  Requires that the shape of inputs be known at graph construction time.

  Packs the list of tensors in `values` into a tensor with rank one higher than
  each tensor in `values`, by packing them along the first dimension.
  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
  tensor will have the shape `(N, A, B, C)`.

  For example:

  ```prettyprint
  # 'x' is [1, 4]
  # 'y' is [2, 5]
  # 'z' is [3, 6]
  parallel_stack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]
  ```

  The difference between stack and parallel_stack is that stack requires all
  of the inputs be computed before the operation will begin but doesn't require
  that the input shapes be known during graph construction.  Parallel stack
  will copy pieces of the input into the output as they become available, in
  some situations this can provide a performance benefit.

  This is the opposite of unstack.  The numpy equivalent is

      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])

  Args:
    values: A list of `Tensor` objects with the same shape and type.
    name: A name for this operation (optional).

  Returns:
    output: A stacked `Tensor` with the same type as `values`.
  """
  with ops.name_scope(name):
    value_t = ops.convert_to_tensor(values[0])
    value_shape = ops.convert_to_tensor(value_t).get_shape()

    output_shape = tensor_shape.TensorShape([len(values)])
    output_shape = output_shape.concatenate(value_shape)
    # expand_dims converts concat to stack.
    return gen_array_ops._parallel_concat(
        [expand_dims(value, 0) for value in values], shape=output_shape) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:47,代码来源:array_ops.py


注:本文中的tensorflow.parallel_stack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。