当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.cast方法代码示例

本文整理汇总了Python中tensorflow.cast方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.cast方法的具体用法?Python tensorflow.cast怎么用?Python tensorflow.cast使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.cast方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_autosummary_var

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:24,代码来源:tfutil.py

示例2: minibatch_stddev_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def minibatch_stddev_layer(x, group_size=4):
    with tf.variable_scope('MinibatchStddev'):
        group_size = tf.minimum(group_size, tf.shape(x)[0])     # Minibatch must be divisible by (or smaller than) group_size.
        s = x.shape                                             # [NCHW]  Input shape.
        y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]])   # [GMCHW] Split minibatch into M groups of size G.
        y = tf.cast(y, tf.float32)                              # [GMCHW] Cast to FP32.
        y -= tf.reduce_mean(y, axis=0, keep_dims=True)           # [GMCHW] Subtract mean over group.
        y = tf.reduce_mean(tf.square(y), axis=0)                # [MCHW]  Calc variance over group.
        y = tf.sqrt(y + 1e-8)                                   # [MCHW]  Calc stddev over group.
        y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True)      # [M111]  Take average over fmaps and pixels.
        y = tf.cast(y, x.dtype)                                 # [M111]  Cast back to original data type.
        y = tf.tile(y, [group_size, 1, s[2], s[3]])             # [N1HW]  Replicate over group and pixels.
        return tf.concat([x, y], axis=1)                        # [NCHW]  Append as new fmap.

#----------------------------------------------------------------------------
# Generator network used in the paper. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:18,代码来源:networks.py

示例3: fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def fprop(self, x, y, **kwargs):
        kwargs.update(self.kwargs)
        if self.attack is not None:
            x = x, self.attack(x)
        else:
            x = x,

        # Catching RuntimeError: Variable -= value not supported by tf.eager.
        try:
            y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], y.dtype))
        except RuntimeError:
            y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],
                                                            y.dtype)))

        logits = [self.model.get_logits(x, **kwargs) for x in x]
        loss = sum(
            tf.reduce_mean(softmax_cross_entropy_with_logits(labels=y,
                                                             logits=logit))
            for logit in logits)
        return loss 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:22,代码来源:loss.py

示例4: from_float32_to_uint8

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def from_float32_to_uint8(
        tensor,
        tensor_key='tensor',
        min_key='min',
        max_key='max'):
    """

    :param tensor:
    :param tensor_key:
    :param min_key:
    :param max_key:
    :returns:
    """
    tensor_min = tf.reduce_min(tensor)
    tensor_max = tf.reduce_max(tensor)
    return {
        tensor_key: tf.cast(
            (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
            * 255.9999, dtype=tf.uint8),
        min_key: tensor_min,
        max_key: tensor_max
    } 
开发者ID:deezer,项目名称:spleeter,代码行数:24,代码来源:tensor.py

示例5: time_stretch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def time_stretch(
        spectrogram,
        factor=1.0,
        method=tf.image.ResizeMethod.BILINEAR):
    """ Time stretch a spectrogram preserving shape in tensorflow. Note that
    this is an approximation in the frequency domain.

    :param spectrogram: Input spectrogram to be time stretched as tensor.
    :param factor: (Optional) Time stretch factor, must be >0, default to 1.
    :param mehtod: (Optional) Interpolation method, default to BILINEAR.
    :returns: Time stretched spectrogram as tensor with same shape.
    """
    T = tf.shape(spectrogram)[0]
    T_ts = tf.cast(tf.cast(T, tf.float32) * factor, tf.int32)[0]
    F = tf.shape(spectrogram)[1]
    ts_spec = tf.image.resize_images(
        spectrogram,
        [T_ts, F],
        method=method,
        align_corners=True)
    return tf.image.resize_image_with_crop_or_pad(ts_spec, T, F) 
开发者ID:deezer,项目名称:spleeter,代码行数:23,代码来源:spectrogram.py

示例6: pitch_shift

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def pitch_shift(
        spectrogram,
        semitone_shift=0.0,
        method=tf.image.ResizeMethod.BILINEAR):
    """ Pitch shift a spectrogram preserving shape in tensorflow. Note that
    this is an approximation in the frequency domain.

    :param spectrogram: Input spectrogram to be pitch shifted as tensor.
    :param semitone_shift: (Optional) Pitch shift in semitone, default to 0.0.
    :param mehtod: (Optional) Interpolation method, default to BILINEAR.
    :returns: Pitch shifted spectrogram (same shape as spectrogram).
    """
    factor = 2 ** (semitone_shift / 12.)
    T = tf.shape(spectrogram)[0]
    F = tf.shape(spectrogram)[1]
    F_ps = tf.cast(tf.cast(F, tf.float32) * factor, tf.int32)[0]
    ps_spec = tf.image.resize_images(
        spectrogram,
        [T, F_ps],
        method=method,
        align_corners=True)
    paddings = [[0, 0], [0, tf.maximum(0, F - F_ps)], [0, 0]]
    return tf.pad(ps_spec[:, :F, :], paddings, 'CONSTANT') 
开发者ID:deezer,项目名称:spleeter,代码行数:25,代码来源:spectrogram.py

示例7: loop_decode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def loop_decode(self):
        # decoder_initial_state: Tuple Tensor (c,h) of size [batch_size x cell.state_size]
        # decoder_first_input: Tensor [batch_size x cell.state_size]

        # Loop the decoding process and collect results
        s,i = self.decoder_initial_state,  tf.cast(self.decoder_first_input,tf.float32)
        for step in range(self.seq_length):
            s, i = self.decode(s,i,step)

        # Return to start
        self.positions.append(self.first_city)

        # Stack visited indices
        self.positions=tf.stack(self.positions,axis=1)  # [Batch,seq_length+1]

        # Sum log_softmax over output steps
        self.log_softmax=tf.add_n(self.log_softmax)  # [Batch,seq_length]

        # Stack attending & pointing distribution
        self.attending=tf.stack(self.attending,axis=1) # [Batch,seq_length,seq_length]
        self.pointing=tf.stack(self.pointing,axis=1) # [Batch,seq_length,seq_length]
        
        # Return stacked lists of visited_indices and log_softmax for backprop
        return self.positions,self.log_softmax 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:26,代码来源:decoder.py

示例8: distorted_inputs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def distorted_inputs():
  """Construct distorted input for CIFAR training using the Reader ops.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
                                                  batch_size=FLAGS.batch_size)
  if FLAGS.use_fp16:
    images = tf.cast(images, tf.float16)
    labels = tf.cast(labels, tf.float16)
  return images, labels 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:cifar10.py

示例9: inputs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def inputs(eval_data):
  """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  images, labels = cifar10_input.inputs(eval_data=eval_data,
                                        data_dir=data_dir,
                                        batch_size=FLAGS.batch_size)
  if FLAGS.use_fp16:
    images = tf.cast(images, tf.float16)
    labels = tf.cast(labels, tf.float16)
  return images, labels 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:cifar10.py

示例10: loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:cifar10.py

示例11: char_predictions

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def char_predictions(self, chars_logit):
    """Returns confidence scores (softmax values) for predicted characters.

    Args:
      chars_logit: chars logits, a tensor with shape
        [batch_size x seq_length x num_char_classes]

    Returns:
      A tuple (ids, log_prob, scores), where:
        ids - predicted characters, a int32 tensor with shape
          [batch_size x seq_length];
        log_prob - a log probability of all characters, a float tensor with
          shape [batch_size, seq_length, num_char_classes];
        scores - corresponding confidence scores for characters, a float
        tensor
          with shape [batch_size x seq_length].
    """
    log_prob = utils.logits_to_log_prob(chars_logit)
    ids = tf.to_int32(tf.argmax(log_prob, dimension=2), name='predicted_chars')
    mask = tf.cast(
        slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool)
    all_scores = tf.nn.softmax(chars_logit)
    selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores')
    scores = tf.reshape(selected_scores, shape=(-1, self._params.seq_length))
    return ids, log_prob, scores 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:model.py

示例12: compute_first_or_last

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def compute_first_or_last(self, select, first=True):
    #perform first ot last operation on row select with probabilistic row selection
    answer = tf.zeros_like(select)
    running_sum = tf.zeros([self.batch_size, 1], self.data_type)
    for i in range(self.max_elements):
      if (first):
        current = tf.slice(select, [0, i], [self.batch_size, 1])
      else:
        current = tf.slice(select, [0, self.max_elements - 1 - i],
                           [self.batch_size, 1])
      curr_prob = current * (1 - running_sum)
      curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
      running_sum += curr_prob
      temp_ans = []
      curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
      for i_ans in range(self.max_elements):
        if (not (first) and i_ans == self.max_elements - 1 - i):
          temp_ans.append(curr_prob)
        elif (first and i_ans == i):
          temp_ans.append(curr_prob)
        else:
          temp_ans.append(tf.zeros_like(curr_prob))
      temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans))
      answer += temp_ans
    return answer 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:model.py

示例13: batch_of_random_bools

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def batch_of_random_bools(batch_size, n):
  """Return a batch of random "boolean" numbers.

  Args:
    batch_size:  Batch size dimension of returned tensor.
    n:  number of entries per batch.

  Returns:
    A [batch_size, n] tensor of "boolean" numbers, where each number is
    preresented as -1 or 1.
  """

  as_int = tf.random_uniform(
      [batch_size, n], minval=0, maxval=2, dtype=tf.int32)
  expanded_range = (as_int * 2) - 1
  return tf.cast(expanded_range, tf.float32) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:18,代码来源:train_eval.py

示例14: one_hot_encoding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for name_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.name_scope(scope, 'OneHotEncoding', [labels]):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:ops.py

示例15: _reshape_instance_masks

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cast [as 别名]
def _reshape_instance_masks(self, keys_to_tensors):
    """Reshape instance segmentation masks.

    The instance segmentation masks are reshaped to [num_instances, height,
    width] and cast to boolean type to save memory.

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D boolean tensor of shape [num_instances, height, width].
    """
    masks = keys_to_tensors['image/segmentation/object']
    if isinstance(masks, tf.SparseTensor):
      masks = tf.sparse_tensor_to_dense(masks)
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)

    return tf.cast(tf.reshape(masks, to_shape), tf.bool) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:tf_example_decoder.py


注:本文中的tensorflow.cast方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。