当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.floor函数代码示例

本文整理汇总了Python中tensorflow.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了floor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_forward_floor

def test_forward_floor():
    ishape = (1, 3, 10, 10)
    inp_array = np.random.uniform(size=ishape).astype(np.float32)
    with tf.Graph().as_default():
        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
        tf.floor(in1)
        compare_tf_with_tvm(inp_array, 'Placeholder:0', 'Floor:0')
开发者ID:LANHUIYING,项目名称:tvm,代码行数:7,代码来源:test_forward.py

示例2: process_reals

def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x
开发者ID:Gavin666Github,项目名称:progressive_growing_of_gans,代码行数:25,代码来源:train.py

示例3: _interpolate2d

def _interpolate2d(imgs, x, y):
    n_batch = tf.shape(imgs)[0]
    xlen = tf.shape(imgs)[1]
    ylen = tf.shape(imgs)[2]
    n_channel = tf.shape(imgs)[3]

    x = tf.to_float(x)
    y = tf.to_float(y)
    xlen_f = tf.to_float(xlen)
    ylen_f = tf.to_float(ylen)
    zero = tf.zeros([], dtype='int32')
    max_x = tf.cast(xlen - 1, 'int32')
    max_y = tf.cast(ylen - 1, 'int32')

    # scale indices from [-1, 1] to [0, xlen/ylen]
    x = (x + 1.) * (xlen_f - 1.) * 0.5
    y = (y + 1.) * (ylen_f - 1.) * 0.5

    # do sampling
    x0 = tf.cast(tf.floor(x), 'int32')
    x1 = x0 + 1
    y0 = tf.cast(tf.floor(y), 'int32')
    y1 = y0 + 1

    x0 = tf.clip_by_value(x0, zero, max_x)
    x1 = tf.clip_by_value(x1, zero, max_x)
    y0 = tf.clip_by_value(y0, zero, max_y)
    y1 = tf.clip_by_value(y1, zero, max_y)
    base = _repeat(tf.range(n_batch) * xlen * ylen, ylen * xlen)
    base_x0 = base + x0 * ylen
    base_x1 = base + x1 * ylen
    index00 = base_x0 + y0
    index01 = base_x0 + y1
    index10 = base_x1 + y0
    index11 = base_x1 + y1

    # use indices to lookup pixels in the flat image and restore
    # n_channel dim
    imgs_flat = tf.reshape(imgs, [-1, n_channel])
    imgs_flat = tf.to_float(imgs_flat)
    I00 = tf.gather(imgs_flat, index00)
    I01 = tf.gather(imgs_flat, index01)
    I10 = tf.gather(imgs_flat, index10)
    I11 = tf.gather(imgs_flat, index11)

    # and finally calculate interpolated values
    dx = x - tf.to_float(x0)
    dy = y - tf.to_float(y0)
    w00 = tf.expand_dims((1. - dx) * (1. - dy), 1)
    w01 = tf.expand_dims((1. - dx) * dy, 1)
    w10 = tf.expand_dims(dx * (1. - dy), 1)
    w11 = tf.expand_dims(dx * dy, 1)
    output = tf.add_n([w00*I00, w01*I01, w10*I10, w11*I11])

    # reshape
    output = tf.reshape(output, [n_batch, xlen, ylen, n_channel])

    return output
开发者ID:Ryo-Ito,项目名称:spatial_transformer_network,代码行数:58,代码来源:warp.py

示例4: staircase_loss

def staircase_loss(y_true, y_pred, var_a=16.0, cnst=1.0/255.0):
    """ Keras Staircase Loss """
    height = cnst
    width = cnst
    var_x = K.clip(K.abs(y_true - y_pred) - 0.5 * cnst, 0.0, 1.0)
    loss = height*(K.tanh(var_a*((var_x/width)-tf.floor(var_x/width)-0.5)) /
                   (2.0*K.tanh(var_a/2.0)) + 0.5 + tf.floor(var_x/width))
    loss += 1e-10
    return K.mean(loss, axis=-1)
开发者ID:stonezuohui,项目名称:faceswap,代码行数:9,代码来源:losses.py

示例5: get_output_shape_tensor

 def get_output_shape_tensor(self, flatten=None):
     if flatten == None:
         flatten = self.flatten
     with tf.name_scope(self.layer_name):
         if self.conv_padding.lower() == 'same':
             if self.pool:
                 if self.pool_type.lower() == 'same':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.ceil(tf.ceil(tf.to_float(self.input_shape[1]) /
                                                              self.conv_stride[1]) / self.pool_size[0])),
                                  tf.to_int32(tf.ceil(tf.ceil(tf.to_float(self.input_shape[2])) /
                                                      self.conv_stride[2]) / self.pool_size[1]),
                                  self.filter_shape[3])
                 elif self.pool_type.lower() == 'valid':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.floor(tf.ceil(tf.to_float(self.input_shape[1]) /
                                                               self.conv_stride[1]) / self.pool_size[0])),
                                  tf.to_int32(
                                      tf.floor(tf.to_float(tf.ceil(tf.to_float(self.input_shape[2])) /
                                                           self.conv_stride[2]) / self.pool_size[1])),
                                  self.filter_shape[3])
             else:
                 out_shape = (self.input_shape[0],
                              tf.to_int32(tf.ceil(tf.to_float(self.input_shape[1]) / self.conv_stride[1])),
                              tf.to_int32(tf.ceil(tf.to_float(self.input_shape[2])) / self.conv_stride[2]),
                              self.filter_shape[3])
         elif self.conv_padding.lower() == 'valid':
             if self.pool:
                 if self.pool_type.lower() == 'same':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.ceil(np.ceil(
                                      tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                      self.conv_stride[1])) / self.pool_size[0]),
                                  tf.to_int32(tf.ceil(np.ceil(
                                      tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                      self.conv_stride[2])) / self.pool_size[1]),
                                  self.filter_shape[3])
                 elif self.pool_type.lower() == 'valid':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.floor(np.ceil(
                                      tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                      self.conv_stride[1])) / self.pool_size[0]),
                                  tf.to_int32(tf.floor(np.ceil(
                                      tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                      self.conv_stride[2])) / self.pool_size[1]),
                                  self.filter_shape[3])
             else:
                 out_shape = (self.input_shape[0],
                              tf.to_int32(
                                  tf.ceil(tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                          self.conv_stride[1])),
                              tf.to_int32(
                                  tf.ceil(tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                          self.conv_stride[2])),
                              self.filter_shape[3])
     return (out_shape[0], out_shape[1] * out_shape[2] * out_shape[3]) if flatten else out_shape
开发者ID:savourylie,项目名称:fucos-tensorflow,代码行数:56,代码来源:layers.py

示例6: sample_img

def sample_img(img, n_samples):
    sx = tf.random_uniform((n_samples,), 0, 1) * 27
    sy = tf.random_uniform((n_samples,), 0, 1) * 27
    sx_lower = tf.cast(tf.floor(sx), tf.int32)
    sx_upper = tf.cast(tf.ceil(sx), tf.int32)

    sy_lower = tf.cast(tf.floor(sy), tf.int32)
    sy_upper = tf.cast(tf.ceil(sy), tf.int32)

    sx_nearest = tf.cast(tf.round(sx), tf.int32)
    sy_nearest = tf.cast(tf.round(sy), tf.int32)
    inds = tf.pack([sx_nearest, sy_nearest])
    samples = tf.gather(tf.reshape(img, (-1,)), sx_nearest + sy_nearest*28)
    return sx/27, sy/27, samples
开发者ID:lukemetz,项目名称:cppn,代码行数:14,代码来源:model.py

示例7: _log_unnormalized_prob

 def _log_unnormalized_prob(self, x):
   safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 1.)
   y = -self.power * tf.log(safe_x)
   is_supported = tf.broadcast_to(tf.equal(x, safe_x), tf.shape(y))
   neg_inf = tf.fill(
       tf.shape(y), value=np.array(-np.inf, dtype=y.dtype.as_numpy_dtype))
   return tf.where(is_supported, y, neg_inf)
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:zipf.py

示例8: dropout_sparse

def dropout_sparse(x, keep_prob, num_nonzero_elems):
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
开发者ID:burakbayramli,项目名称:classnotes,代码行数:7,代码来源:util.py

示例9: generate_dropout_masks

def generate_dropout_masks(keep_prob, shape, amount):
  masks = []
  for _ in range(amount):
    dropout_mask = tf.random_uniform(shape) + (keep_prob)
    dropout_mask = tf.floor(dropout_mask) / (keep_prob)
    masks.append(dropout_mask)
  return masks
开发者ID:ALISCIFP,项目名称:models,代码行数:7,代码来源:variational_dropout.py

示例10: count_sketch

def count_sketch(probs, project_size):
    """ Calculates count-min sketch of a tensor.
    Args:
      probs: A `Tensor`
      project_size: output size (`int`)

    Returns:c
      A projected count-min sketch `Tensor` with shape [batch_size, project_size].
    """
    with tf.variable_scope('CountSketch_'+probs.name.replace(':', '_')) as scope:
        input_size = int(probs.get_shape()[1])

        # h, s must be sampled once
        history = tf.get_collection('__countsketch')
        if scope.name in history: scope.reuse_variables()
        tf.add_to_collection('__countsketch', scope.name)

        h = tf.get_variable('h', [input_size], initializer=tf.random_uniform_initializer(0, project_size), trainable=False)
        s = tf.get_variable('s', [input_size], initializer=tf.random_uniform_initializer(0, 2), trainable=False)

        h = tf.cast(h, 'int32')
        s = tf.cast(tf.floor(s) * 2 - 1, 'int32') # 1 or -1

        sk = _sketch_op.count_sketch(probs, h, s, project_size)
        sk.set_shape([probs.get_shape()[0], project_size])
        return sk
开发者ID:ml-ai-nlp-ir,项目名称:compact-bilinear-pooling-tf,代码行数:26,代码来源:count_sketch.py

示例11: rnn_decoder

def rnn_decoder(cell, inputs, initial_state, embedding_size, embedding_length, sequence_length,
                name='RNNDecoder', reuse=False, use_inputs_prob=0.0, static_input=None):
    with tf.variable_scope(name, reuse=reuse):
        # print(tf.get_variable_scope().reuse, tf.get_variable_scope().name)
        with tf.name_scope("embedding"):
            batch_size = tf.shape(initial_state)[0]
            embedding_table = tf.get_variable(
                name='embedding_table',
                shape=[embedding_length, embedding_size],
                initializer=tf.truncated_normal_initializer(stddev=glorot_mul(embedding_length, embedding_size)),
            )
            # 0 is index for _SOS_ (start of sentence symbol)
            initial_embedding = tf.gather(embedding_table, tf.zeros(tf.pack([batch_size]), tf.int32))

        states = [initial_state]
        outputs = []
        outputs_softmax = []
        decoder_outputs_argmax_embedding = []

        for j in range(sequence_length):
            with tf.variable_scope(tf.get_variable_scope(), reuse=True if j > 0 else None):
                # get input :
                #   either feedback the previous decoder argmax output
                #   or use the provided input (note that you have to use the previous input (index si therefore -1)
                input = initial_embedding
                if j > 0:
                    true_input = tf.gather(embedding_table, inputs[j - 1])
                    decoded_input = decoder_outputs_argmax_embedding[-1]
                    choice = tf.floor(tf.random_uniform([1], use_inputs_prob, 1 + use_inputs_prob, tf.float32))
                    input = choice * true_input + (1.0 - choice) * decoded_input

                if static_input:
                    input = tf.concat(1, [input, static_input])

                # print(tf.get_variable_scope().reuse, tf.get_variable_scope().name)
                output, state = cell(input, states[-1])

                projection = linear(
                    input=output,
                    input_size=cell.output_size,
                    output_size=embedding_length,
                    name='output_linear_projection'
                )

                outputs.append(projection)
                states.append(state)

                softmax = tf.nn.softmax(projection, name="output_softmax")
                # we do no compute the gradient trough argmax
                output_argmax = tf.stop_gradient(tf.argmax(softmax, 1))
                # we do no compute the gradient for embeddings when used with noisy argmax outputs
                output_argmax_embedding = tf.stop_gradient(tf.gather(embedding_table, output_argmax))
                decoder_outputs_argmax_embedding.append(output_argmax_embedding)

                outputs_softmax.append(tf.expand_dims(softmax, 1))

    # remove the initial state
    states = states[1:]

    return states, outputs, outputs_softmax
开发者ID:jurcicek,项目名称:ndm,代码行数:60,代码来源:bricks.py

示例12: _cdf

  def _cdf(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= high,
    #         = 0, if y < low,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = tf.floor(y)

    # P[X <= j], used when low < X < high.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += tf.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      result_so_far = tf.where(j < low, tf.zeros_like(result_so_far),
                               result_so_far)
    if high is not None:
      result_so_far = tf.where(j >= high, tf.ones_like(result_so_far),
                               result_so_far)

    return result_so_far
开发者ID:lewisKit,项目名称:probability,代码行数:30,代码来源:quantized_distribution.py

示例13: fpn_map_rois_to_levels

def fpn_map_rois_to_levels(boxes):
    """
    Assign boxes to level 2~5.

    Args:
        boxes (nx4):

    Returns:
        [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
        [tf.Tensor]: 4 tensors, the gathered boxes in each level.

    Be careful that the returned tensor could be empty.
    """
    sqrtarea = tf.sqrt(tf_area(boxes))
    level = tf.to_int32(tf.floor(
        4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))))

    # RoI levels range from 2~5 (not 6)
    level_ids = [
        tf.where(level <= 2),
        tf.where(tf.equal(level, 3)),   # == is not supported
        tf.where(tf.equal(level, 4)),
        tf.where(level >= 5)]
    level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
                 for i, x in enumerate(level_ids)]
    num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
                     for i, x in enumerate(level_ids)]
    add_moving_summary(*num_in_levels)

    level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
    return level_ids, level_boxes
开发者ID:tobyma,项目名称:tensorpack,代码行数:31,代码来源:model.py

示例14: loop_body

    def loop_body(should_continue, k):
      """Resample the non-accepted points."""
      # The range of U is chosen so that the resulting sample K lies in
      # [0, tf.int64.max). The final sample, if accepted, is K + 1.
      u = tf.random_uniform(
          shape,
          minval=minval_u,
          maxval=maxval_u,
          dtype=self.power.dtype,
          seed=seed())

      # Sample the point X from the continuous density h(x) \propto x^(-power).
      x = self._hat_integral_inverse(u)

      # Rejection-inversion requires a `hat` function, h(x) such that
      # \int_{k - .5}^{k + .5} h(x) dx >= pmf(k + 1) for points k in the
      # support. A natural hat function for us is h(x) = x^(-power).
      #
      # After sampling X from h(x), suppose it lies in the interval
      # (K - .5, K + .5) for integer K. Then the corresponding K is accepted if
      # if lies to the left of x_K, where x_K is defined by:
      #   \int_{x_k}^{K + .5} h(x) dx = H(x_K) - H(K + .5) = pmf(K + 1),
      # where H(x) = \int_x^inf h(x) dx.

      # Solving for x_K, we find that x_K = H_inverse(H(K + .5) + pmf(K + 1)).
      # Or, the acceptance condition is X <= H_inverse(H(K + .5) + pmf(K + 1)).
      # Since X = H_inverse(U), this simplifies to U <= H(K + .5) + pmf(K + 1).

      # Update the non-accepted points.
      # Since X \in (K - .5, K + .5), the sample K is chosen as floor(X + 0.5).
      k = tf.where(should_continue, tf.floor(x + 0.5), k)
      accept = (u <= self._hat_integral(k + .5) + tf.exp(self._log_prob(k + 1)))

      return [should_continue & (~accept), k]
开发者ID:asudomoeva,项目名称:probability,代码行数:34,代码来源:zipf.py

示例15: quantize

def quantize(t, quant_scale, max_value=1.0):
  """Quantize a tensor t with each element in [-max_value, max_value]."""
  t = tf.minimum(max_value, tf.maximum(t, -max_value))
  big = quant_scale * (t + max_value) + 0.5
  with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}):
    res = (tf.floor(big) / quant_scale) - max_value
  return res
开发者ID:Jmq14,项目名称:models,代码行数:7,代码来源:neural_gpu.py


注:本文中的tensorflow.floor函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。