當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.split方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.split方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.split方法的具體用法?Python v1.split怎麽用?Python v1.split使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.split方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: unpack_grad_tuple

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def unpack_grad_tuple(gv, gpt):
  """Unpack a previously packed collection of gradient tensors.

  Args:
    gv: A (grad, var) pair to be unpacked.
    gpt: A GradPackTuple describing the packing operation that produced gv.

  Returns:
    A list of (grad, var) pairs corresponding to the values that were
     originally packed into gv, maybe following subsequent operations like
     reduction.
  """
  elt_widths = [x.num_elements() for x in gpt.shapes]
  with tf.device(gv[0][0].device):
    with tf.name_scope('unpack'):
      splits = tf.split(gv[0], elt_widths)
      unpacked_gv = []
      for idx, s in enumerate(splits):
        unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
  return unpacked_gv 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:22,代碼來源:allreduce.py

示例2: loss_function

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def loss_function(self, inputs, build_network_result):
    logits = build_network_result.logits

    # Unpack model output back to locations and confidence scores of predictions
    # Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
    # Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
    pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)

    # Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
    # Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
    # Shape of num_gt: [batch_size]
    _, gt_loc, gt_label, num_gt = inputs
    gt_label = tf.cast(gt_label, tf.int32)

    box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
    class_loss = self._classification_loss(pred_label, gt_label, num_gt)

    tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
    tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
    return class_loss + box_loss 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:22,代碼來源:ssd_model.py

示例3: _cell_base

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def _cell_base(self, net, prev_layer):
    """Runs the beginning of the conv cell before the predicted ops are run."""
    num_filters = self._filter_size

    # Check to be sure prev layer stuff is setup correctly
    prev_layer = self._reduce_prev_layer(prev_layer, net)

    net = tf.nn.relu(net)
    net = slim.conv2d(net, num_filters, 1, scope='1x1')
    net = slim.batch_norm(net, scope='beginning_bn')
    split_axis = get_channel_index()
    net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
    for split in net:
      assert int(split.shape[split_axis] == int(
          self._num_conv_filters * self._filter_scaling))
    net.append(prev_layer)
    return net 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:19,代碼來源:nasnet_utils.py

示例4: _split_divisible

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def _split_divisible(num, num_ways, divisible_by=8):
  """Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
  assert num % divisible_by == 0
  assert num // num_ways >= divisible_by
  # Note: want to round down, we adjust each split to match the total.
  base = num // num_ways // divisible_by * divisible_by
  result = []
  accumulated = 0
  for i in range(num_ways):
    r = base
    while accumulated + r < num * (i + 1) // num_ways:
      r += divisible_by
    result.append(r)
    accumulated += r
  assert accumulated == num
  return result 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:18,代碼來源:mobilenet_conv_blocks.py

示例5: algorithm_from_params

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def algorithm_from_params(params):
  """Returns a BatchAllReduceAlgorithm from a Params tuple."""
  if params.all_reduce_spec:
    if params.gpu_indices:
      gpu_indices = [int(x) for x in params.gpu_indices.split(',')]
    else:
      gpu_indices = [x for x in range(params.num_gpus)]
    return AllReduceSpecAlgorithm(params.all_reduce_spec, gpu_indices,
                                  params.agg_small_grads_max_bytes,
                                  params.agg_small_grads_max_group)
  elif params.hierarchical_copy:
    return HierarchicalCopyAlgorithm(params.network_topology)
  else:
    if params.local_parameter_device == 'gpu':
      devices_to_reduce_on = ['/gpu:%d' % i for i in range(params.num_gpus)]
    else:
      devices_to_reduce_on = ['/cpu:0']
    return CopyToDeviceAlgorithm(devices_to_reduce_on) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:20,代碼來源:batch_allreduce.py

示例6: maybe_split_tensors

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def maybe_split_tensors(self, concatenated_tensor):
    """Split concatenated tensor into `num_splits` pieces."""
    if not self._num_splits:
      return concatenated_tensor

    if len(concatenated_tensor) != 1:
      raise RuntimeError('tensors must be concatenated via '
                         'maybe_concat_tensors() before splitting')

    concatenated_tensor = concatenated_tensor[0]
    total_tensor_size = concatenated_tensor.shape.num_elements()
    split_size = total_tensor_size // self._num_splits
    split_size_last = total_tensor_size - split_size * (self._num_splits - 1)
    split_sizes = [split_size] * (self._num_splits - 1) + [split_size_last]
    tensor_packs = tf.split(concatenated_tensor, split_sizes)
    return tensor_packs 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:18,代碼來源:batch_allreduce.py

示例7: undo_maybe_concat_tensors

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def undo_maybe_concat_tensors(self, concatenated_tensor):
    """Undo maybe_concat_tensors()."""
    if not self._num_splits:
      return concatenated_tensor

    if len(concatenated_tensor) != 1:
      raise RuntimeError(
          'undo_maybe_split_tensors() must be called before '
          'undo_maybe_concat_tensors when num_splits is greater than 1')
    concatenated_tensor = concatenated_tensor[0]

    tensors_with_sizes = tf.split(concatenated_tensor,
                                  self._orig_sizes)
    tensors_with_shapes = [
        tf.reshape(grad, shape) for grad, shape in zip(
            tensors_with_sizes, self._orig_shapes)
    ]
    return tensors_with_shapes 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:20,代碼來源:batch_allreduce.py

示例8: _build_tiled_linear

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def _build_tiled_linear(self, inputs, input_name_and_sizes,
                          output_name_and_sizes, add_bias):
    # pylint: disable=missing-docstring
    def split_output(output):
      if len(output_name_and_sizes) == 1:
        return output
      elif len(set([size for _, size in output_name_and_sizes])) == 1:
        # This is a bit faster than several tf.slice calls.
        return tf.split(output, len(output_name_and_sizes), axis=1)
      else:
        outputs = []
        offset = 0
        for _, output_size in output_name_and_sizes:
          outputs.append(tf.slice(output, [0, offset], [-1, output_size]))
          offset += output_size
        return outputs

    weights = self._ensure_weights()
    if len(inputs) > 1:
      inputs = tf.concat(inputs, 1)
    if add_bias:
      biases = self._ensure_biases()
      return split_output(tf.nn.xw_plus_b(inputs, weights, biases))
    else:
      return split_output(tf.matmul(inputs, weights)) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:27,代碼來源:tiled_linear.py

示例9: combine

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:expert_utils.py

示例10: update_internal_states_early

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def update_internal_states_early(self, internal_states, frames):
    """Update the internal states early in the network in GRU-like way."""
    batch_size = common_layers.shape_list(frames[0])[0]
    internal_state = internal_states[0][0][:batch_size, :, :, :]
    state_activation = tf.concat([internal_state, frames[0]], axis=-1)
    state_gate_candidate = tf.layers.conv2d(
        state_activation, 2 * self.hparams.recurrent_state_size,
        (3, 3), padding="SAME", name="state_conv")
    state_gate, state_candidate = tf.split(state_gate_candidate, 2, axis=-1)
    state_gate = tf.nn.sigmoid(state_gate)
    state_candidate = tf.tanh(state_candidate)
    internal_state = internal_state * state_gate
    internal_state += state_candidate * (1.0 - state_gate)
    max_batch_size = max(_MAX_BATCH, self.hparams.batch_size)
    diff_batch_size = max_batch_size - batch_size
    internal_state = tf.pad(
        internal_state, [[0, diff_batch_size], [0, 0], [0, 0], [0, 0]])
    return [[internal_state]] 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:basic_stochastic.py

示例11: conv_lstm

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def conv_lstm(x,
              kernel_size,
              filters,
              padding="SAME",
              dilation_rate=(1, 1),
              name=None,
              reuse=None):
  """Convolutional LSTM in 1 dimension."""
  with tf.variable_scope(
      name, default_name="conv_lstm", values=[x], reuse=reuse):
    gates = conv(
        x,
        4 * filters,
        kernel_size,
        padding=padding,
        dilation_rate=dilation_rate)
    g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
    new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
    return tf.sigmoid(g[2]) * tf.tanh(new_cell) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_layers.py

示例12: gated_linear_unit_layer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = layers().Dense(depth * 2, activation=None)(x)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_layers.py

示例13: testSymbolModalityInputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def testSymbolModalityInputs(self):
    batch_size = 10
    num_datashards = 5
    length = 5
    vocab_size = 5000
    hidden_size = 9
    model_hparams = common_hparams.basic_params1()
    model_hparams.hidden_size = hidden_size
    model_hparams.mode = tf.estimator.ModeKeys.TRAIN
    x = np.random.randint(
        vocab_size, size=(batch_size, length, 1, 1))
    data_parallelism = expert_utils.Parallelism(
        ["/device:CPU:0"] * num_datashards)
    xs = tf.split(x, num_datashards)
    sharded_output = data_parallelism(
        modalities.get_bottom(modalities.ModalityType.SYMBOL),
        xs,
        model_hparams,
        vocab_size)
    output = tf.concat(sharded_output, 0)
    self.evaluate(tf.global_variables_initializer())
    res = self.evaluate(output)
    self.assertEqual(res.shape, (batch_size, length, 1, hidden_size)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:modalities_test.py

示例14: is_generate_per_split

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def is_generate_per_split(self):
    """A single call to `generate_samples` generates for all `dataset_splits`.

    Set to True if you already have distinct subsets of data for each dataset
    split specified in `self.dataset_splits`. `self.generate_samples` will be
    called once for each split.

    Set to False if you have a unified dataset that you'd like to have split out
    into training and evaluation data automatically. `self.generate_samples`
    will be called only once and the data will be sharded across the dataset
    splits specified in `self.dataset_splits`.

    Returns:
      bool
    """
    raise NotImplementedError() 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:video_utils.py

示例15: generate_samples

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import split [as 別名]
def generate_samples(self, data_dir, tmp_dir, dataset_split):
    """Generate samples of the frames with possible extra data.

    Args:
      data_dir: final data directory. Typically only used in this method to copy
        over user-supplied vocab files if there are extra fields needing them.
      tmp_dir: temporary directory that you can use for downloading and scratch.
      dataset_split: problem.DatasetSplit, which data split to generate samples
        for (for example, training and evaluation). You can assume it's TRAIN if
        self.

    Yields:
      Sample: dict<str feature_name, feature value>; we assume that there is
        a "frame" feature with unencoded frame which is a numpy arrays of shape
        [frame_height, frame_width, num_channels] and which will be transcoded
        into an image format by generate_encodeded_samples.
    """
    raise NotImplementedError() 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:video_utils.py


注:本文中的tensorflow.compat.v1.split方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。