当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.cast函数代码示例

本文整理汇总了Python中tensorflow.cast函数的典型用法代码示例。如果您正苦于以下问题:Python cast函数的具体用法?Python cast怎么用?Python cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _apply

  def _apply(self, grad, var, indices=None):
    lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
    m = self.get_slot(var, "m")
    v = self.get_slot(var, "v")
    beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)

    # m_t = beta1 * m + (1 - beta1) * g_t
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
    with tf.control_dependencies([m_t]):
      m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices)
    m_gathered = self._gather(m_t, indices=indices)

    # Also see tf.nn.moments.
    variance = tf.squared_difference(grad, m_gathered)

    # v_t = beta2 * v + (1 - beta2) * variance
    v_scaled_new_values = variance * (1 - beta2_t)
    v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
    with tf.control_dependencies([v_t]):
      v_t = self._assign_add(v, updates=v_scaled_new_values, indices=indices)
    v_gathered = self._gather(v_t, indices=indices)

    factor = v_gathered / (variance + epsilon_t)
    update = lr * grad * tf.minimum(factor, 1.0)
    var_update = self._assign_sub(ref=var, updates=update, indices=indices)
    return tf.group(*[var_update, m_t])
开发者ID:rwth-i6,项目名称:returnn,代码行数:29,代码来源:TFUpdater.py

示例2: visualize_boxes_in_image

def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
  """Overlay bounding box list on image.

  Currently this visualization plots a 1 pixel thick red bounding box on top
  of the image.  Note that tf.image.draw_bounding_boxes essentially is
  1 indexed.

  Args:
    image: an image tensor with shape [height, width, 3]
    boxlist: a BoxList
    normalized: (boolean) specify whether corners are to be interpreted
      as absolute coordinates in image space or normalized with respect to the
      image size.
    scope: name scope.

  Returns:
    image_and_boxes: an image tensor with shape [height, width, 3]
  """
  with tf.name_scope(scope, 'VisualizeBoxesInImage'):
    if not normalized:
      height, width, _ = tf.unstack(tf.shape(image))
      boxlist = scale(boxlist,
                      1.0 / tf.cast(height, tf.float32),
                      1.0 / tf.cast(width, tf.float32))
    corners = tf.expand_dims(boxlist.get(), 0)
    image = tf.expand_dims(image, 0)
    return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
开发者ID:NoPointExc,项目名称:models,代码行数:27,代码来源:box_list_ops.py

示例3: boston_input_fn

def boston_input_fn():
    boston = tf.contrib.learn.datasets.load_boston()
    features = tf.cast(
        tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
    labels = tf.cast(
        tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
    return features, labels
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:7,代码来源:dnn_test.py

示例4: entropy

    def entropy(self, n, p):
        # Note that given n and p where p is a probability vector of
        # length k, the entropy requires a sum over all
        # possible configurations of a k-vector which sums to n. It's
        # expensive.
        # http://stackoverflow.com/questions/36435754/generating-a-numpy-array-with-all-combinations-of-numbers-that-sum-to-less-than
        sess = tf.Session()
        n = sess.run(tf.cast(tf.squeeze(n), dtype=tf.int32))
        sess.close()
        p = tf.cast(tf.squeeze(p), dtype=tf.float32)
        if isinstance(n, np.int32):
            k = get_dims(p)[0]
            max_range = np.zeros(k, dtype=np.int32) + n
            x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                 if sum(i)==n])
            logpmf = self.logpmf(x, n, p)
            return tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))
        else:
            out = []
            for j in range(n.shape[0]):
                k = get_dims(p)[0]
                max_range = np.zeros(k, dtype=np.int32) + n[j]
                x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                     if sum(i)==n[j]])
                logpmf = self.logpmf(x, n[j], p[j, :])
                out += [tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))]

            return tf.pack(out)
开发者ID:crack521,项目名称:edward,代码行数:28,代码来源:distributions.py

示例5: clip

def clip(x, min_value, max_value):
    '''Element-wise value clipping.
    '''
    if max_value < min_value:
        max_value = min_value
    return tf.clip_by_value(x, tf.cast(min_value, dtype=_FLOATX),
                            tf.cast(max_value, dtype=_FLOATX))
开发者ID:Veterun,项目名称:Keras,代码行数:7,代码来源:tensorflow_backend.py

示例6: __init__

    def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer
        self.num_sampled=20

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss() #-->self.loss_nce()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
开发者ID:brucexia6116,项目名称:text_classification,代码行数:34,代码来源:p8_TextRNN_model.py

示例7: _create_classification_weights

  def _create_classification_weights(self,
                                     match,
                                     positive_class_weight=1.0,
                                     negative_class_weight=1.0):
    """Create classification weights for each anchor.

    Positive (matched) anchors are associated with a weight of
    positive_class_weight and negative (unmatched) anchors are associated with
    a weight of negative_class_weight. When anchors are ignored, weights are set
    to zero. By default, both positive/negative weights are set to 1.0,
    but they can be adjusted to handle class imbalance (which is almost always
    the case in object detection).

    Args:
      match: a matcher.Match object that provides a matching between anchors
        and groundtruth boxes.
      positive_class_weight: weight to be associated to positive anchors
      negative_class_weight: weight to be associated to negative anchors

    Returns:
      cls_weights: a float32 tensor with shape [num_anchors] representing
        classification weights.
    """
    matched_indicator = tf.cast(match.matched_column_indicator(), tf.float32)
    ignore_indicator = tf.cast(match.ignored_column_indicator(), tf.float32)
    unmatched_indicator = 1.0 - matched_indicator - ignore_indicator
    cls_weights = (positive_class_weight * matched_indicator
                   + negative_class_weight * unmatched_indicator)
    return cls_weights
开发者ID:GERASM1,项目名称:Semana-i-Equipo-Seat-Here,代码行数:29,代码来源:target_assigner.py

示例8: read_cifar_files

def read_cifar_files(filename_queue, distort_images = True):
    reader = tf.FixedLengthRecordReader(record_bytes=record_length)
    key, record_string = reader.read(filename_queue)
    record_bytes = tf.decode_raw(record_string, tf.uint8)
    image_label = tf.cast(tf.slice(record_bytes, [0], [1]), tf.int32)
  
    # Extract image
    image_extracted = tf.reshape(tf.slice(record_bytes, [1], [image_vec_length]),
                                 [num_channels, image_height, image_width])
    
    # Reshape image
    image_uint8image = tf.transpose(image_extracted, [1, 2, 0])
    reshaped_image = tf.cast(image_uint8image, tf.float32)
    # Randomly Crop image
    final_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, crop_width, crop_height)
    
    if distort_images:
        # Randomly flip the image horizontally, change the brightness and contrast
        final_image = tf.image.random_flip_left_right(final_image)
        final_image = tf.image.random_brightness(final_image,max_delta=63)
        final_image = tf.image.random_contrast(final_image,lower=0.2, upper=1.8)

    # Normalize whitening
    final_image = tf.image.per_image_standardization(final_image)
    return(final_image, image_label)
开发者ID:Bluebear171,项目名称:tensorflow_cookbook,代码行数:25,代码来源:03_cnn_cifar10.py

示例9: _add_layer

    def _add_layer( self, input, n_in, n_out, activation=None, weights=None, bias=None, dropout=None, l2_reg=False ):
        if( weights is None ):
            ''' Xavier init '''
            init_range = math.sqrt(6.0 / (n_in + n_out))
            init_w = tf.random_uniform( [n_in,n_out], -init_range, init_range)
            weights = tf.cast( tf.Variable( init_w ), tf.float32 )
            self.weights.append( weights )

        if( bias is None ):
            bias = tf.cast( tf.Variable( tf.zeros( [ n_out ] ) ), tf.float32 )
            self.bias.append( bias )

        if( l2_reg ):
            ''' L2 regularization '''
            l2_reg = tf.nn.l2_loss( weights )
            self.l2_reg += l2_reg

        layer = tf.matmul( input, weights ) + bias
        if( activation is not None ):
            layer = activation( layer )

        if( dropout is not None ):
            ''' Dropout + scaling '''
            layer = tf.nn.dropout( layer, 1-dropout ) * 1/( 1- dropout )

        return layer
开发者ID:utkarshsimha,项目名称:deep-learning,代码行数:26,代码来源:tf_deep_neural_network.py

示例10: _fn

 def _fn(*args):
   p = tf.identity(proposal_log_prob_fn(*args), name="proposal_log_prob")
   t = tf.identity(target_log_prob_fn(*args), name="target_log_prob")
   dtype = p.dtype.base_dtype
   beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
   return tf.identity(beta * t + (1. - beta) * p,
                      name="convex_combined_log_prob")
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:sample_annealed_importance.py

示例11: read_and_decode

def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
            'label': tf.FixedLenFeature([], tf.int64),
            'height': tf.FixedLenFeature([], tf.int64),
            'width': tf.FixedLenFeature([], tf.int64),
            'depth': tf.FixedLenFeature([], tf.int64)
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    img_height = tf.cast(features['height'], tf.int32)
    img_width = tf.cast(features['width'], tf.int32)
    img_depth = tf.cast(features['depth'], tf.int32)
    # Convert label from a scalar uint8 tensor to an int32 scalar.
    label = tf.cast(features['label'], tf.int32)

    image.set_shape([IMG_PIXELS])
    image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])

    # Convert from [0, 255] -> [-0.5, 0.5] floats.
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

    return image, label
开发者ID:ankurag12,项目名称:CIFAR-10,代码行数:28,代码来源:read_data.py

示例12: compute_accuracy

def compute_accuracy(y_hat, labels, sparse=False):
    """Compute accuracy for a 3-dimensional outputs.

    The prediction is assumed to be made by argmax.

    Parameters
    ----------
    y_hat : tensor, shape (batch_size, n_samples, n_outputs)
        Raw predictions of a neural network. It is not required to convert it
        to softmax, because softmax is a monotonous transform.
    labels : tensor
        True labels. It can have shape (batch_size, n_samples), then each
        values should be an index within [0, n_classes). Or alternatively
        it can have shape (batch_size, n_samples, n_outputs), then for each
        sample a probability distribution with n_outputs values should be
        provided (this case also handles one-hot label encoding). In the
        latter case the correct label is also selected by argmax. Set `sparse`
        parameter to select an appropriate setting.
    sparse : bool, default False
        Whether `labels` are indices or full distributions.

    Returns
    -------
    accuracy : scalar tensor
        Computed accuracy.
    """
    prediction = tf.arg_max(y_hat, 2)
    if sparse:
        labels = tf.cast(labels, prediction.dtype)
    else:
        labels = tf.arg_max(labels, 2)

    return tf.reduce_mean(tf.cast(tf.equal(prediction, labels), tf.float32))
开发者ID:developeralgo8888,项目名称:ufcnn,代码行数:33,代码来源:ufcnn.py

示例13: drawGraph

    def drawGraph(self, n_row, n_latent, n_col):
        with tf.name_scope('matDecomp'):
            self._p = tf.placeholder(tf.float32, shape=[None, n_col])
            self._c = tf.placeholder(tf.float32, shape=[None, n_col])
            self._lambda = tf.placeholder(tf.float32)
            self._index = tf.placeholder(tf.float32, shape=[None, n_row])
            self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
            self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
            self._h = tf.matmul(tf.matmul(self._index, self._A), self._B) 
            
            weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
            self._weighted_loss = weighted_loss
            l2_A = tf.reduce_sum(tf.square(self._A))
            l2_B = tf.reduce_sum(tf.square(self._B))
            n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
            l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
            reg_term = tf.mul(self._lambda, l2)
            self._loss = tf.add(weighted_loss, reg_term)
            
            self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
            one = tf.constant(1, tf.float32)
            pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
            cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
            self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))

            self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
            tf.scalar_summary('training_weighted_loss_l2', self._loss)
            tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
            merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py

示例14: ValidArcAndTokenMasks

def ValidArcAndTokenMasks(lengths, max_length, dtype=tf.float32):
  r"""Returns 0/1 masks for valid arcs and tokens.

  Args:
    lengths: [B] vector of input sequence lengths.
    max_length: Scalar maximum input sequence length, aka M.
    dtype: Data type for output mask.

  Returns:
    [B,M,M] tensor A with 0/1 indicators of valid arcs.  Specifically,
      A_{b,t,s} = t,s < lengths[b] ? 1 : 0
    [B,M] matrix T with 0/1 indicators of valid tokens.  Specifically,
      T_{b,t} = t < lengths[b] ? 1 : 0
  """
  lengths_bx1 = tf.expand_dims(lengths, 1)
  sequence_m = tf.range(tf.cast(max_length, lengths.dtype.base_dtype))
  sequence_1xm = tf.expand_dims(sequence_m, 0)

  # Create vectors of 0/1 indicators for valid tokens.  Note that the comparison
  # operator will broadcast from [1,M] and [B,1] to [B,M].
  valid_token_bxm = tf.cast(sequence_1xm < lengths_bx1, dtype)

  # Compute matrices of 0/1 indicators for valid arcs as the outer product of
  # the valid token indicator vector with itself.
  valid_arc_bxmxm = tf.matmul(
      tf.expand_dims(valid_token_bxm, 2), tf.expand_dims(valid_token_bxm, 1))

  return valid_arc_bxmxm, valid_token_bxm
开发者ID:ALISCIFP,项目名称:models,代码行数:28,代码来源:digraph_ops.py

示例15: GetLengths

  def GetLengths(self, dim=2, factor=1):
    """Returns the lengths of the batch of elements in the given dimension.

    WARNING: The returned sizes may not exactly match TF's calculation.
    Args:
      dim: dimension to get the sizes of, in [1,2]. batch, depth not allowed.
      factor: A scalar value to multiply by.

    Returns:
      The original heights/widths scaled by the current scaling of the model and
      the given factor.

    Raises:
      ValueError: If the args are invalid.
    """
    if dim == 1:
      lengths = self.heights
    elif dim == 2:
      lengths = self.widths
    else:
      raise ValueError('Invalid dimension given to GetLengths')
    lengths = tf.cast(lengths, tf.float32)
    if self.reduction_factors[dim] is not None:
      lengths = tf.div(lengths, self.reduction_factors[dim])
    else:
      lengths = tf.ones_like(lengths)
    if factor != 1:
      lengths = tf.mul(lengths, tf.cast(factor, tf.float32))
    return tf.cast(lengths, tf.int32)
开发者ID:Peratham,项目名称:models,代码行数:29,代码来源:vgslspecs.py


注:本文中的tensorflow.cast函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。