当前位置: 首页>>代码示例>>Python>>正文


Python v2.size方法代码示例

本文整理汇总了Python中tensorflow.compat.v2.size方法的典型用法代码示例。如果您正苦于以下问题:Python v2.size方法的具体用法?Python v2.size怎么用?Python v2.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v2的用法示例。


在下文中一共展示了v2.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: diag

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def diag(v, k=0):  # pylint: disable=missing-docstring
  """Raises an error if input is not 1- or 2-d."""
  v = asarray(v).data
  v_rank = tf.rank(v)

  v.shape.with_rank_at_most(2)

  # TODO(nareshmodi): Consider a utils.Assert version that will fail during
  # tracing time if the shape is known.
  tf.debugging.Assert(
      utils.logical_or(tf.equal(v_rank, 1), tf.equal(v_rank, 2)), [v_rank])

  def _diag(v, k):
    return utils.cond(
        tf.equal(tf.size(v), 0),
        lambda: tf.zeros([abs(k), abs(k)], dtype=v.dtype),
        lambda: tf.linalg.diag(v, k=k))

  def _diag_part(v, k):
    v_shape = tf.shape(v)
    v, k = utils.cond(
        utils.logical_or(
            utils.less_equal(k, -1 * utils.getitem(v_shape, 0)),
            utils.greater_equal(k, utils.getitem(v_shape, 1)),
        ), lambda: (tf.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
    result = tf.linalg.diag_part(v, k=k)
    return result

  result = utils.cond(
      tf.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
  return utils.tensor_to_ndarray(result) 
开发者ID:google,项目名称:trax,代码行数:33,代码来源:array_ops.py

示例2: repeat

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def repeat(a, repeats, axis=None):  # pylint: disable=missing-docstring
  a = asarray(a).data
  original_shape = a._shape_as_list()  # pylint: disable=protected-access
  # Best effort recovery of the shape.
  if original_shape is not None and None not in original_shape:
    if not original_shape:
      original_shape = (repeats,)
    else:
      repeats_np = np.ravel(np.array(repeats))
      if repeats_np.size == 1:
        repeats_np = repeats_np.item()
        if axis is None:
          original_shape = (repeats_np * np.prod(original_shape),)
        else:
          original_shape[axis] = repeats_np * original_shape[axis]
      else:
        if axis is None:
          original_shape = (repeats_np.sum(),)
        else:
          original_shape[axis] = repeats_np.sum()

  repeats = asarray(repeats).data
  result = tf.repeat(a, repeats, axis)
  result.set_shape(original_shape)

  return utils.tensor_to_ndarray(result) 
开发者ID:google,项目名称:trax,代码行数:28,代码来源:array_ops.py

示例3: _boundaries_to_sizes

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _boundaries_to_sizes(a, boundaries, axis):
  """Converting boundaries of splits to sizes of splits.

  Args:
    a: the array to be split.
    boundaries: the boundaries, as in np.split.
    axis: the axis along which to split.

  Returns:
    A list of sizes of the splits, as in tf.split.
  """
  if axis >= len(a.shape):
    raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
  total_size = a.shape[axis]
  sizes = []
  sizes_sum = 0
  prev = 0
  for i, b in enumerate(boundaries):
    size = b - prev
    if size < 0:
      raise ValueError('The %s-th boundary %s is smaller than the previous '
                       'boundary %s' % (i, b, prev))
    size = min(size, max(0, total_size - sizes_sum))
    sizes.append(size)
    sizes_sum += size
    prev = b
  sizes.append(max(0, total_size - sizes_sum))
  return sizes 
开发者ID:google,项目名称:trax,代码行数:30,代码来源:array_ops.py

示例4: _pad_left_to

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _pad_left_to(n, old_shape):
  old_shape = asarray(old_shape, dtype=np.int32).data
  new_shape = tf.pad(
      old_shape, [[tf.math.maximum(n - tf.size(old_shape), 0), 0]],
      constant_values=1)
  return asarray(new_shape) 
开发者ID:google,项目名称:trax,代码行数:8,代码来源:array_ops.py

示例5: atleast_3d

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def atleast_3d(*arys):  # pylint: disable=missing-docstring

  def new_shape(_, old_shape):
    # pylint: disable=g-long-lambda
    ndim_ = tf.size(old_shape)
    return utils.cond(
        ndim_ == 0, lambda: tf.constant([1, 1, 1], dtype=tf.int32),
        lambda: utils.cond(
            ndim_ == 1, lambda: tf.pad(old_shape, [[1, 1]], constant_values=1),
            lambda: tf.pad(old_shape, [[0, 1]], constant_values=1)))

  return _atleast_nd(3, new_shape, *arys) 
开发者ID:google,项目名称:trax,代码行数:14,代码来源:array_ops.py

示例6: safe_mean

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def safe_mean(losses):
  total = tf.reduce_sum(losses)
  num_elements = tf.dtypes.cast(tf.size(losses), dtype=losses.dtype)
  return tf.math.divide_no_nan(total, num_elements) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:6,代码来源:resnet50_ctl_tf2.py

示例7: _while_loop

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _while_loop(*, dim, steps_num, current_state,
                drift_fn, volatility_fn, wiener_mean,
                num_samples, times, dt, sqrt_dt, time_step, num_requested_times,
                keep_mask, swap_memory, random_type, seed, normal_draws):
  """Smaple paths using tf.while_loop."""
  cond_fn = lambda i, *args: i < steps_num
  def step_fn(i, written_count, current_state, result):
    return _euler_step(
        i=i,
        written_count=written_count,
        current_state=current_state,
        result=result,
        drift_fn=drift_fn,
        volatility_fn=volatility_fn,
        wiener_mean=wiener_mean,
        num_samples=num_samples,
        times=times,
        dt=dt,
        sqrt_dt=sqrt_dt,
        keep_mask=keep_mask,
        random_type=random_type,
        seed=seed,
        normal_draws=normal_draws)
  maximum_iterations = (tf.cast(1. / time_step, dtype=tf.int32)
                        + tf.size(times))
  result = tf.zeros((num_samples, num_requested_times, dim),
                    dtype=current_state.dtype)
  _, _, _, result = tf.while_loop(
      cond_fn, step_fn, (0, 0, current_state, result),
      maximum_iterations=maximum_iterations,
      swap_memory=swap_memory)
  return result 
开发者ID:google,项目名称:tf-quant-finance,代码行数:34,代码来源:euler_sampling.py

示例8: _prepare_grid

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _prepare_grid(*, times, time_step, dtype):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    time_step: Rank 0 real `Tensor`. Maximal distance between points in
      resulting grid.
    dtype: `tf.Dtype` of the input and output `Tensor`s.

  Returns:
    Tuple `(all_times, mask, time_points)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times` and
    the uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`. The `Tensor` is sorted in ascending order and may contain
    duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
    `time_indices`. An integer `Tensor` of the same shape as `times` indicating
    `times` indices in `all_times`.
  """
  grid = tf.range(0.0, times[-1], time_step, dtype=dtype)
  all_times = tf.concat([grid, times], axis=0)
  mask = tf.concat([
      tf.zeros_like(grid, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ],
                   axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  # Remove duplicate points
  all_times = tf.unique(all_times).y
  time_indices = tf.searchsorted(all_times, times)
  mask = tf.gather(mask, perm)
  return all_times, mask, time_indices 
开发者ID:google,项目名称:tf-quant-finance,代码行数:38,代码来源:euler_sampling.py

示例9: _prepare_grid

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _prepare_grid(times, *params):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    *params: Parameters of the Heston model. Either scalar `Tensor`s of the
      same `dtype` or instances of `PiecewiseConstantFunc`.

  Returns:
    Tuple `(all_times, mask)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times`, the
    uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`, and jump locations of piecewise constant parameters The
    `Tensor` is sorted in ascending order and may contain duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
  """
  additional_times = []
  for param in params:
    if hasattr(param, 'is_piecewise_constant'):
      if param.is_piecewise_constant:
        # Flatten all jump locations
        additional_times.append(tf.reshape(param.jump_locations(), [-1]))
  zeros = tf.constant([0], dtype=times.dtype)
  all_times = tf.concat([zeros] + [times] + additional_times, axis=0)
  additional_times_mask = [
      tf.zeros_like(times, dtype=tf.bool) for times in additional_times]
  mask = tf.concat([
      tf.cast(zeros, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ] + additional_times_mask, axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  mask = tf.gather(mask, perm)
  return all_times, mask 
开发者ID:google,项目名称:tf-quant-finance,代码行数:39,代码来源:vector_hull_white.py

示例10: gather_from_dict

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def gather_from_dict(tensor_dict, choice):
  """Chooses tensor values along first dimension using given choice.

  If `tensor_dict` = {
    0: zeros(shape=(6)),
    1: ones(shape=(6)),
    2: twos(shape=(6)),
    3: threes(shape=(6))
  }
  and choice = [0, 0, 2, 2, 1, 0]
  then returned tensor is [0., 0., 2., 2., 1., 0.]

  Args:
    tensor_dict: A dict with int keys and tensor values. All tensor values must
      be of same type and shape.
    choice: A 1-d int tensor with number of elements equal to first dimension of
      tensors in `tensor_dict`. The values in the tensor must be valid keys in
      `tensor_dict`.

  Returns:
    A tensor of same type and shape as tensors in `tensor_dict`.
  """
  one_tensor = next(iter(tensor_dict.values()))

  # Check number of elements in `choice`.
  tf.debugging.assert_rank(choice, rank=1)
  tf.debugging.assert_equal(tf.size(choice), tf.shape(one_tensor)[0])

  zeros_tensor = tf.zeros_like(one_tensor)
  final_tensor = zeros_tensor
  for c, t in tensor_dict.items():
    # Check shapes and type
    tf.debugging.assert_equal(tf.shape(t), tf.shape(one_tensor))
    tf.debugging.assert_type(t, tf_type=one_tensor.dtype)
    final_tensor += tf.compat.v1.where(tf.equal(choice, c), t, zeros_tensor)
  return final_tensor 
开发者ID:google-research,项目名称:valan,代码行数:38,代码来源:utils.py

示例11: get_first_true_column

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def get_first_true_column(x):
  """Transforms `x` into a tensor which has all elements set to False except the first True in the column.

  If x is [[True, False, False],
           [True, False, False],
           [False, True, False],
           [False, True, True]]
  the output should be
          [[True, False, False],
           [False, False, False],
           [False, True, False],
           [False, False, True]
          ]

  Args:
    x: A bool tensor with shape [num_steps, batch_size]

  Returns:
    A bool tensor with the same shape.
  """
  x = tf.transpose(x, perm=[1, 0])
  # Get indices
  y = tf.where(tf.equal(x, True))
  # Find first column in every row which is True
  first_true_cols = tf.cast(
      tf.math.segment_min(data=y[:, 1], segment_ids=y[:, 0]), tf.int32)
  # Convert back to indices
  first_true_indices = tf.stack(
      [tf.range(tf.size(first_true_cols)), first_true_cols], axis=1)
  # Now create the mask
  first_true_mask_sparse = tf.SparseTensor(
      indices=tf.cast(first_true_indices, tf.int64),
      values=tf.ones([tf.size(first_true_cols)], dtype=tf.bool),
      dense_shape=x.shape)
  first_true_mask = tf.sparse.to_dense(
      first_true_mask_sparse, default_value=False)
  return tf.transpose(first_true_mask, perm=[1, 0]) 
开发者ID:google-research,项目名称:valan,代码行数:39,代码来源:utils.py

示例12: circular_pad

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def circular_pad(input_tensor, axis, padding):
  """Pads tensor circularly.

  More specifically, pad on the right with the tensor values from the left of
  the tensor, as if you had concatenated the tensor on the right and vice versa.

  Args:
    input_tensor: typically a batch of input "images"
    axis: on which to perform the circluar padding
    padding: See tf.nn.conv2d arg: padding

  Returns:
    Tensor of shape BxHxWxC2
  """
  assert 0 <= axis < len(input_tensor.shape), 'Axis out of bounds'
  multiples = [1] * len(input_tensor.shape)
  multiples[axis] = 3
  tiled_input = tf.tile(input_tensor, multiples)
  left = input_tensor.shape[axis] - padding[0]
  right = 2 * input_tensor.shape[axis] + padding[1]

  begin = [0] * len(input_tensor.shape)
  end = list(input_tensor.shape)
  begin[axis] = left
  end[axis] = right
  size = [a - b for a, b in zip(end, begin)]

  output_tensor = tf.slice(tiled_input, begin, size)
  # Do a shape assert
  return output_tensor 
开发者ID:google-research,项目名称:valan,代码行数:32,代码来源:utils.py

示例13: parallel_conv2d

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def parallel_conv2d(inputs, filters, strides, padding):
  """Applies each filter in the batch of filters to each input.

  tf.nn.conv2d only supports applying the same filter on a batch of inputs.
  This function provides a similar interface, but allowing a batch of filters,
  a different one for each input.

  In the below definitions, B is the batch size, H and W are spatial input or
  output dimensions (overloaded between input and output), C1 is the input
  number of channels, C2 is output number of channels, KHxKW is the
  convolutional kernel spatial size.

  Args:
    inputs: BxHxWxC1 tensor - batch of input "images"
    filters: BxKHxKWxC1xC2 tensor - batch of convolutional kernels
    strides: See tf.nn.conv2d arg: strides
    padding: See tf.nn.conv2d arg: padding

  Returns:
    Tensor of shape BxHxWxC2
  """
  batch_size = inputs.shape[0]

  output_slices = [tf.nn.conv2d(inputs[i:i+1], filters[i], strides, padding)
                   for i in range(batch_size)]
  output = tf.stack(output_slices, axis=0)
  # Each output slice has a batch dimension of size 1. Get rid of it.
  assert output.shape[1] == 1, 'Each slice should have batch size of 1'
  output = output[:, 0, :, :, :]
  # Output should have same batch size and spatial dimensions as input, but
  # the number of channels is determined by the convolution filter
  assert_shape((batch_size, inputs.shape[1], inputs.shape[2], filters.shape[4]),
               output.shape)
  return output 
开发者ID:google-research,项目名称:valan,代码行数:36,代码来源:utils.py

示例14: _filter_max_length

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def _filter_max_length(example, max_title_length=256):
  """Indicates whether the example's length is lower than the maximum length."""
  return tf.size(example["targets"]) <= max_title_length 
开发者ID:tensorflow,项目名称:models,代码行数:5,代码来源:input_pipeline.py

示例15: get_input_dataset

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import size [as 别名]
def get_input_dataset(input_file_pattern,
                      batch_size,
                      params,
                      is_training,
                      strategy=None):
  """Returns input dataset from input file string."""

  # When using TPU pods, we need to clone dataset across
  # workers and need to pass in function that returns the dataset rather
  # than passing dataset instance itself.
  use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy)
  if use_dataset_fn:
    if batch_size % strategy.num_replicas_in_sync != 0:
      raise ValueError(
          "Batch size must be divisible by number of replicas : {}".format(
              strategy.num_replicas_in_sync))

    # As auto rebatching is not supported in
    # `experimental_distribute_datasets_from_function()` API, which is
    # required when cloning dataset to multiple workers in eager mode,
    # we use per-replica batch size.
    batch_size = int(batch_size / strategy.num_replicas_in_sync)

  def _dataset_fn(ctx=None):
    """Returns tf.data.Dataset for distributed BERT pretraining."""
    input_files = []
    for input_pattern in input_file_pattern.split(","):
      input_files.extend(tf.io.gfile.glob(input_pattern))

    return create_dataset(
        input_files,
        batch_size,
        params,
        is_training=is_training,
        input_pipeline_context=ctx)

  if use_dataset_fn:
    return strategy.experimental_distribute_datasets_from_function(_dataset_fn)
  else:
    return strategy.experimental_distribute_dataset(_dataset_fn()) 
开发者ID:tensorflow,项目名称:models,代码行数:42,代码来源:input_pipeline.py


注:本文中的tensorflow.compat.v2.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。