当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.round函数代码示例

本文整理汇总了Python中tensorflow.round函数的典型用法代码示例。如果您正苦于以下问题:Python round函数的具体用法?Python round怎么用?Python round使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了round函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_center_coords

    def compute_center_coords(self, y_true, y_pred):
        batch_size = tf.shape(y_pred)[0]
        h = tf.shape(y_pred)[1]
        w = tf.shape(y_pred)[2]
        n_chans = tf.shape(y_pred)[3]
        n_dims = 5

        # weighted center of mass
        x = tf.cast(tf.tile(tf.reshape(self.xs, [1, h, w]), [batch_size, 1, 1]), tf.float32)
        y = tf.cast(tf.tile(tf.reshape(self.ys, [1, h, w]), [batch_size, 1, 1]), tf.float32)

        eps = 1e-8
        # grayscale
        pred_gray = tf.reduce_mean(y_pred, axis=-1)  # should be batch_size x h x w
        # normalize
        pred_gray = pred_gray - tf.reduce_min(pred_gray, axis=[1, 2], keepdims=True)
        pred_gray = pred_gray / (eps + tf.reduce_max(pred_gray, axis=[1, 2], keepdims=True))
        pred_gray = tf.clip_by_value(pred_gray, 0., 1.)

        # make each of these (batch_size, 1)
        weighted_x = tf.round(tf.expand_dims(
            tf.reduce_sum(x * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        weighted_y = tf.round(tf.expand_dims(
            tf.reduce_sum(y * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        batch_indices = tf.reshape(tf.linspace(0., tf.cast(batch_size, tf.float32) - 1., batch_size), [batch_size, 1])
        indices = tf.cast(tf.concat([batch_indices, weighted_y, weighted_x], axis=-1), tf.int32)
        #center_rgb = transform_network_utils.interpolate([y_true,  weighted_x, weighted_y], constant_vals=1.)
        center_rgb = tf.gather_nd(y_true, indices)
        center_rgb = tf.reshape(center_rgb, [batch_size, n_chans])

        center_point_xyrgb = tf.concat([
                        weighted_x, weighted_y, center_rgb
                    ], axis=-1)

        return pred_gray, center_point_xyrgb
开发者ID:xamyzhao,项目名称:evolving_wilds,代码行数:35,代码来源:metrics.py

示例2: _smallest_size_at_least

def _smallest_size_at_least(height, width, target_height, target_width):
    """Computes new shape with the smallest side equal to `smallest_side`.

    Computes new shape with the smallest side equal to `smallest_side` while
    preserving the original aspect ratio.

    Args:
      height: an int32 scalar tensor indicating the current height.
      width: an int32 scalar tensor indicating the current width.
      smallest_side: A python integer or scalar `Tensor` indicating the size of
        the smallest side after resize.

    Returns:
      new_height: an int32 scalar tensor indicating the new height.
      new_width: and int32 scalar tensor indicating the new width.
    """
    target_height = tf.convert_to_tensor(target_height, dtype=tf.int32)
    target_width = tf.convert_to_tensor(target_width, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    target_height = tf.to_float(target_height)
    target_width = tf.to_float(target_width)

    scale = tf.cond(tf.greater(target_height / height, target_width / width),
                    lambda: target_height / height,
                    lambda: target_width / width)
    new_height = tf.to_int32(tf.round(height * scale))
    new_width = tf.to_int32(tf.round(width * scale))
    return new_height, new_width
开发者ID:Lagogoy,项目名称:Deep-Learning-21-Examples,代码行数:30,代码来源:vgg_preprocessing.py

示例3: make_tf_top

def make_tf_top(x_shape, loss='sigmoid_ce'):
  """
    builds the top layer, i.e. the loss layer. 
  """
  with tf.name_scope('top') as scope:
    x = tf.placeholder(tf.float32, shape=x_shape, name='input')
    y = tf.placeholder(tf.float32, shape=x_shape, name='output')

    if loss=='sigmoid_ce':
      L = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(x, y))
      correct_prediction = tf.equal(tf.round( tf.sigmoid(x) ), tf.round( y ))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
      accuracy_summary = [tf.summary.scalar('accuracy', accuracy)]
    elif loss=='softmax_ce':
      L = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(x, y))
      correct_prediction = tf.equal(tf.argmax( tf.nn.softmax(x), 1 ), tf.argmax( y, 1 ))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
      accuracy_summary = [tf.summary.scalar('accuracy', accuracy)]
    elif loss=='sigmoid_l2':
      L = tf.nn.l2_loss(tf.sigmoid(x) - y)
      accuracy = None
      accuracy_summary = []
    elif loss=='l2':
      L = tf.nn.l2_loss(x - y)
      accuracy = None
      accuracy_summary = []

    loss_summary = tf.summary.scalar('log_loss', tf.log(L))
    dx = tf.gradients(L, x)[0]

    return L, dx, tf.summary.merge([loss_summary] + accuracy_summary), accuracy
开发者ID:jsseely,项目名称:tensorflow-target-prop,代码行数:31,代码来源:tprop_train_stable.py

示例4: get_exemplar_images

def get_exemplar_images(images, exemplar_size, targets_pos=None):
  """Crop exemplar image from input images"""
  with tf.name_scope('get_exemplar_image'):
    batch_size, x_height, x_width = images.get_shape().as_list()[:3]
    z_height, z_width = exemplar_size

    if targets_pos is None:
      target_pos_single = [[get_center(x_height), get_center(x_width)]]
      targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
    else:
      targets_pos_ = targets_pos

    # convert to top-left corner based coordinates
    top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
    bottom = tf.to_int32(top + z_height)
    left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
    right = tf.to_int32(left + z_width)

    def _slice(x):
      f, t, l, b, r = x
      c = f[t:b, l:r]
      return c

    exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
    exemplar_img.set_shape([batch_size, z_height, z_width, 3])
    return exemplar_img
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:26,代码来源:infer_utils.py

示例5: blend_images

def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
开发者ID:Sabrewarrior,项目名称:PhotoOrientation,代码行数:35,代码来源:misc.py

示例6: get_adv_loss

def get_adv_loss(logits_r, logits_f, labels_r, labels_f):
    dis_r_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_r, labels=tf.cast(labels_r, tf.float32)))
    dis_f_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_f, labels=tf.cast(labels_f, tf.float32)))

    dis_loss = dis_r_loss + dis_f_loss

    dis_r_acc = slim.metrics.accuracy(tf.cast(tf.round(tf.nn.sigmoid(logits_r)), tf.int32), tf.round(labels_r))
    dis_f_acc = slim.metrics.accuracy(tf.cast(tf.round(tf.nn.sigmoid(logits_f)), tf.int32), tf.round(labels_f))

    dis_acc = (dis_r_acc + dis_f_acc) / 2

    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_f, labels=tf.cast(labels_r, tf.float32)))

    return dis_loss, gen_loss, dis_acc
开发者ID:seindlut,项目名称:deep_p2s,代码行数:14,代码来源:build_subnet.py

示例7: sample_img

def sample_img(img, n_samples):
    sx = tf.random_uniform((n_samples,), 0, 1) * 27
    sy = tf.random_uniform((n_samples,), 0, 1) * 27
    sx_lower = tf.cast(tf.floor(sx), tf.int32)
    sx_upper = tf.cast(tf.ceil(sx), tf.int32)

    sy_lower = tf.cast(tf.floor(sy), tf.int32)
    sy_upper = tf.cast(tf.ceil(sy), tf.int32)

    sx_nearest = tf.cast(tf.round(sx), tf.int32)
    sy_nearest = tf.cast(tf.round(sy), tf.int32)
    inds = tf.pack([sx_nearest, sy_nearest])
    samples = tf.gather(tf.reshape(img, (-1,)), sx_nearest + sy_nearest*28)
    return sx/27, sy/27, samples
开发者ID:lukemetz,项目名称:cppn,代码行数:14,代码来源:model.py

示例8: __call__

 def __call__(self, img):
   scale = 1.0 + tf.random_uniform([], -self.max_stretch, self.max_stretch)
   img_shape = tf.shape(img)
   ts = tf.to_int32(tf.round(tf.to_float(img_shape[:2]) * scale))
   resize_method_map = {'bilinear': tf.image.ResizeMethod.BILINEAR,
                        'bicubic': tf.image.ResizeMethod.BICUBIC}
   return tf.image.resize_images(img, ts, method=resize_method_map[self.interpolation])
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:7,代码来源:transforms.py

示例9: get_scheduled_sample_inputs

  def get_scheduled_sample_inputs(
      self, done_warm_start, groundtruth_items, generated_items, batch_size):

    with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE):
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        feedself = True
      else:
        # Scheduled sampling:
        # Calculate number of ground-truth frames to pass in.
        feedself = False
        iter_num = tf.train.get_global_step()
        # TODO(mbz): what should it be if it's undefined?
        if iter_num is None:
          iter_num = _LARGE_STEP_NUMBER
        k = self.hparams.scheduled_sampling_k
        num_ground_truth = tf.to_int32(
            tf.round(
                tf.to_float(batch_size) *
                (k / (k + tf.exp(tf.to_float(iter_num) / tf.to_float(k))))))

      if feedself and done_warm_start:
        # Feed in generated stuff.
        output_items = generated_items
      elif done_warm_start:
        output_items = []
        for item_gt, item_gen in zip(groundtruth_items, generated_items):
          # Scheduled sampling
          output_items.append(self.scheduled_sample(
              item_gt, item_gen, batch_size, num_ground_truth))
      else:
        # Feed in ground_truth
        output_items = groundtruth_items

      return output_items
开发者ID:kltony,项目名称:tensor2tensor,代码行数:34,代码来源:next_frame.py

示例10: __init__

    def __init__(self, args):
        with tf.device(args.device):
            def circle(x):
                spherenet = tf.square(x)
                spherenet = tf.reduce_sum(spherenet, 1)
                lam = tf.sqrt(spherenet)
                return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])

            def modes(x):
                shape = x.get_shape()
                return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)

            if args.distribution == 'circle':
                x = tf.random_normal([args.batch_size, 2])
                x = circle(x)
            elif args.distribution == 'modes':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                x = modes(x)
            elif args.distribution == 'modal-gaussian':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                y = tf.random_normal([args.batch_size, 2], stddev=0.04, mean=0.15)
                x = tf.round(x) + y
            elif args.distribution == 'sin':
                x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
                x = tf.transpose(x)
                r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
                xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
                x = tf.concat([xy,x], 1)/16.0

            elif args.distribution == 'static-point':
                x = tf.ones([args.batch_size, 2])

            self.x = x
            self.xy = tf.zeros_like(self.x)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:34,代码来源:2d-distribution.py

示例11: toMnistCoordinates_tf

def toMnistCoordinates_tf(coordinate_tanhed, img_size):
    '''
    Transform coordinate in [-1,1] to mnist
    :param coordinate_tanhed: vector in [-1,1] x [-1,1]
    :return: vector in the corresponding mnist coordinate
    '''
    return tf.round(((coordinate_tanhed + 1) / 2.0) * img_size)
开发者ID:jlindsey15,项目名称:mathCognition_RAM,代码行数:7,代码来源:ram_touch.py

示例12: build_fcn_net

    def build_fcn_net(self, inp, use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1', training=True)
        dnn1 = tf.layers.dense(
            bn1, 200, activation=None, kernel_initializer=get_tf_initializer(), name='f1')
        if use_dice:
            dnn1 = dice(dnn1, name='dice_1')
        else:
            dnn1 = prelu(dnn1)
        dnn2 = tf.layers.dense(
            dnn1, 80, activation=None, kernel_initializer=get_tf_initializer(), name='f2')
        if use_dice:
            dnn2 = dice(dnn2, name='dice_2')
        else:
            dnn2 = prelu(dnn2)
        dnn3 = tf.layers.dense(
            dnn2, 2, activation=None, kernel_initializer=get_tf_initializer(), name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = - \
                tf.reduce_mean(tf.log(self.y_hat) * self.tensors.target)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            else:
                self.aux_loss = tf.constant(0.0)

            # Accuracy metric
            self.accuracy = tf.reduce_mean(
                tf.cast(tf.equal(tf.round(self.y_hat), self.tensors.target), tf.float32))
开发者ID:q64545,项目名称:x-deeplearning,代码行数:30,代码来源:model.py

示例13: build_fcn_net

    def build_fcn_net(self,inp,use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp,name='bn1')
        dnn1 = tf.layers.dense(bn1,200,activation=None,name='f1')

        if use_dice:
            dnn1 = dice(dnn1,name='dice_1')
        else:
            dnn1 = prelu(dnn1,'prelu1')

        dnn2 = tf.layers.dense(dnn1,80,activation=None,name='f2')
        if use_dice:
            dnn2 = dice(dnn2,name='dice_2')
        else:
            dnn2 = prelu(dnn2,name='prelu2')

        dnn3 = tf.layers.dense(dnn2,2,activation=None,name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            tf.summary.scalar('loss',self.loss)
            self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)

            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat),self.target_ph),tf.float32))
            tf.summary.scalar('accuracy',self.accuracy)

        self.merged = tf.summary.merge_all()
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:30,代码来源:model.py

示例14: almost_equal

def almost_equal(a, b):
    """
    :param a: tensor :param b: tensor
    :returns equivalent to numpy: a == b, if a and b were ndarrays
    """
    not_almost_equal = tf.abs(tf.sign(tf.round(a - b)))
    return tf.abs(not_almost_equal - 1)
开发者ID:lobachevzky,项目名称:movies,代码行数:7,代码来源:ops.py

示例15: calc_smape_rounded

def calc_smape_rounded(true, predicted, weights):
    """
    Calculates SMAPE on rounded submission values. Should be close to official SMAPE in competition
    :param true:
    :param predicted:
    :param weights: Weights mask to exclude some values
    :return:
    """
    n_valid = tf.reduce_sum(weights)
    true_o = tf.round(tf.expm1(true))
    pred_o = tf.maximum(tf.round(tf.expm1(predicted)), 0.0)
    summ = tf.abs(true_o) + tf.abs(pred_o)
    zeros = summ < 0.01
    raw_smape = tf.abs(pred_o - true_o) / summ * 2.0
    smape = tf.where(zeros, tf.zeros_like(summ, dtype=tf.float32), raw_smape)
    return tf.reduce_sum(smape * weights) / n_valid
开发者ID:JXieHao,项目名称:kaggle-web-traffic,代码行数:16,代码来源:model.py


注:本文中的tensorflow.round函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。