本文整理汇总了Python中tensorflow.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ceil函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_forward_ceil
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, 'Placeholder:0', 'Ceil:0')
示例2: pad_to_multiple
def pad_to_multiple(tensor, multiple):
"""Returns the tensor zero padded to the specified multiple.
Appends 0s to the end of the first and second dimension (height and width) of
the tensor until both dimensions are a multiple of the input argument
'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
be of shape [1, 4, 8, 1].
Args:
tensor: rank 4 float32 tensor, where
tensor -> [batch_size, height, width, channels].
multiple: the multiple to pad to.
Returns:
padded_tensor: the tensor zero padded to the specified multiple.
"""
tensor_shape = tensor.get_shape()
batch_size = static_shape.get_batch_size(tensor_shape)
tensor_height = static_shape.get_height(tensor_shape)
tensor_width = static_shape.get_width(tensor_shape)
tensor_depth = static_shape.get_depth(tensor_shape)
if batch_size is None:
batch_size = tf.shape(tensor)[0]
if tensor_height is None:
tensor_height = tf.shape(tensor)[1]
padded_tensor_height = tf.to_int32(
tf.ceil(tf.to_float(tensor_height) / tf.to_float(multiple))) * multiple
else:
padded_tensor_height = int(
math.ceil(float(tensor_height) / multiple) * multiple)
if tensor_width is None:
tensor_width = tf.shape(tensor)[2]
padded_tensor_width = tf.to_int32(
tf.ceil(tf.to_float(tensor_width) / tf.to_float(multiple))) * multiple
else:
padded_tensor_width = int(
math.ceil(float(tensor_width) / multiple) * multiple)
if tensor_depth is None:
tensor_depth = tf.shape(tensor)[3]
# Use tf.concat instead of tf.pad to preserve static shape
if padded_tensor_height != tensor_height:
height_pad = tf.zeros([
batch_size, padded_tensor_height - tensor_height, tensor_width,
tensor_depth
])
tensor = tf.concat([tensor, height_pad], 1)
if padded_tensor_width != tensor_width:
width_pad = tf.zeros([
batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
tensor_depth
])
tensor = tf.concat([tensor, width_pad], 2)
return tensor
示例3: _update_lipschitz
def _update_lipschitz(self,v,i):
config = self.config
if len(v.shape) > 1:
k = self.config.weight_constraint_k or 100.0000
wi_hat = v
if len(v.shape) == 4:
#fij = tf.reduce_sum(tf.abs(wi_hat), axis=[0,1])
fij = wi_hat
fij = tf.reduce_sum(tf.abs(fij), axis=[1])
fij = tf.reduce_max(fij, axis=[0])
else:
fij = wi_hat
if self.config.ortho_pnorm == "inf":
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=0), axis=0)
else:
# conv
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=1), axis=0)
ratio = (1.0/tf.maximum(1.0, wp/k))
if self.config.weight_bounce:
bounce = tf.minimum(1.0, tf.ceil(wp/k-0.999))
ratio -= tf.maximum(0.0, bounce) * 0.2
if self.config.weight_scaleup:
up = tf.minimum(1.0, tf.ceil(0.02-wp/k))
ratio += tf.maximum(0.0, up) * k/wp * 0.2
wi = ratio*(wi_hat)
#self.gan.metrics['wi'+str(i)]=wp
#self.gan.metrics['wk'+str(i)]=ratio
#self.gan.metrics['bouce'+str(i)]=bounce
return tf.assign(v, wi)
return None
示例4: _anchor_component_tf
def _anchor_component_tf(self):
print('Use TF anchors')
with tf.variable_scope('ANCHOR_' + self._tag) as scope:
# just to get the shape right
height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0])))
width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0])))
self._anchors, self._anchor_length = generate_anchors_pre_tf(
height, width, self._feat_stride[0], self._anchor_scales,
self._anchor_ratios)
示例5: _anchor_component
def _anchor_component(self):
with tf.variable_scope('ANCHOR_' + self._tag) as scope:
# just to get the shape right
height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0])))
width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0])))
anchors, anchor_length = tf.py_func(generate_anchors_pre,
[height, width,
self._feat_stride, self._anchor_scales, self._anchor_ratios],
[tf.float32, tf.int32], name="generate_anchors")
anchors.set_shape([None, 4])
anchor_length.set_shape([])
self._anchors = anchors
self._anchor_length = anchor_length
示例6: sample_img
def sample_img(img, n_samples):
sx = tf.random_uniform((n_samples,), 0, 1) * 27
sy = tf.random_uniform((n_samples,), 0, 1) * 27
sx_lower = tf.cast(tf.floor(sx), tf.int32)
sx_upper = tf.cast(tf.ceil(sx), tf.int32)
sy_lower = tf.cast(tf.floor(sy), tf.int32)
sy_upper = tf.cast(tf.ceil(sy), tf.int32)
sx_nearest = tf.cast(tf.round(sx), tf.int32)
sy_nearest = tf.cast(tf.round(sy), tf.int32)
inds = tf.pack([sx_nearest, sy_nearest])
samples = tf.gather(tf.reshape(img, (-1,)), sx_nearest + sy_nearest*28)
return sx/27, sy/27, samples
示例7: _survival_function
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = tf.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += tf.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(j < low, tf.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),
result_so_far)
return result_so_far
示例8: resnet_fpn_backbone
def resnet_fpn_backbone(image, num_blocks, freeze_c2=True):
shape2d = tf.shape(image)[2:]
mult = float(cfg.FPN.RESOLUTION_REQUIREMENT)
new_shape2d = tf.to_int32(tf.ceil(tf.to_float(shape2d) / mult) * mult)
pad_shape2d = new_shape2d - shape2d
assert len(num_blocks) == 4, num_blocks
with resnet_argscope():
chan = image.shape[1]
pad_base = maybe_reverse_pad(2, 3)
l = tf.pad(image, tf.stack(
[[0, 0], [0, 0],
[pad_base[0], pad_base[1] + pad_shape2d[0]],
[pad_base[0], pad_base[1] + pad_shape2d[1]]]))
l.set_shape([None, chan, None, None])
l = Conv2D('conv0', l, 64, 7, strides=2, activation=BNReLU, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
if freeze_c2:
c2 = tf.stop_gradient(c2)
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
c5 = resnet_group('group3', c4, resnet_bottleneck, 512, num_blocks[3], 2)
# 32x downsampling up to now
# size of c5: ceil(input/32)
return c2, c3, c4, c5
示例9: crop_or_pad
def crop_or_pad(waves, length, channels):
"""Crop or pad wave to have shape [N, length, channels].
Args:
waves: A 3D `Tensor` of NLC format.
length: A Python scalar. The output wave size.
channels: Number of output waves channels.
Returns:
A 3D `Tensor` of NLC format with shape [N, length, channels].
"""
waves = tf.convert_to_tensor(waves)
batch_size = waves.shape[0].value
waves_shape = tf.shape(waves)
# Force audio length.
pad = tf.maximum(0, length - waves_shape[1])
right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
left_pad = pad - right_pad
waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
waves = waves[:, :length, :]
# Force number of channels.
num_repeats = tf.to_int32(
tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]
waves.set_shape([batch_size, length, channels])
return waves
示例10: non_zero_tokens
def non_zero_tokens(tokens):
"""Receives a vector of tokens (float) which are zero-padded. Returns a vector of the same size, which has the value
1.0 in positions with actual tokens and 0.0 in positions with zero-padding.
:param tokens:
:return:
"""
return tf.ceil(tokens / tf.reduce_max(tokens, [1], keep_dims=True))
示例11: reshape_seqs
def reshape_seqs(x, avg_window_size=3, **kwargs):
B = tf.shape(x)[0]
L = tf.cast(tf.shape(x)[1], tf.float32)
D = x.get_shape().as_list()[-1]
b = tf.transpose(x, [0, 2, 1])
extra_pads = tf.cast(tf.ceil(L / avg_window_size) * avg_window_size - L, tf.int32)
c = tf.pad(b, tf.concat([tf.zeros([2, 2], dtype=tf.int32), [[0, extra_pads]]], axis=0))
return tf.reshape(c, [B, D, avg_window_size, -1])
示例12: imageWarpIm
def imageWarpIm(imageBatch,pMtrxBatch,opt,name=None):
with tf.name_scope("ImWarp"):
imageBatch = tf.expand_dims(imageBatch,-1)
batchSize = tf.shape(imageBatch)[0]
imageH,imageW = opt.H,opt.H
H,W = opt.H,opt.W
warpGTmtrxBatch = tf.tile(tf.expand_dims(opt.warpGTmtrx,0),[batchSize,1,1])
transMtrxBatch = tf.matmul(warpGTmtrxBatch,pMtrxBatch)
# warp the canonical coordinates
X,Y = np.meshgrid(np.linspace(-1,1,W),np.linspace(-1,1,H))
XYhom = tf.transpose(tf.stack([X.reshape([-1]),Y.reshape([-1]),np.ones([X.size])],axis=1))
XYhomBatch = tf.tile(tf.expand_dims(XYhom,0),[batchSize,1,1])
XYwarpHomBatch = tf.matmul(transMtrxBatch,tf.to_float(XYhomBatch))
XwarpHom,YwarpHom,ZwarpHom = tf.split(XYwarpHomBatch,3,1)
Xwarp = tf.reshape(XwarpHom/ZwarpHom,[batchSize,H,W])
Ywarp = tf.reshape(YwarpHom/ZwarpHom,[batchSize,H,W])
# get the integer sampling coordinates
Xfloor,Xceil = tf.floor(Xwarp),tf.ceil(Xwarp)
Yfloor,Yceil = tf.floor(Ywarp),tf.ceil(Ywarp)
XfloorInt,XceilInt = tf.to_int32(Xfloor),tf.to_int32(Xceil)
YfloorInt,YceilInt = tf.to_int32(Yfloor),tf.to_int32(Yceil)
imageIdx = tf.tile(tf.reshape(tf.range(batchSize),[batchSize,1,1]),[1,H,W])
imageVec = tf.reshape(imageBatch,[-1,tf.shape(imageBatch)[3]])
imageVecOutside = tf.concat([imageVec,tf.zeros([1,tf.shape(imageBatch)[3]])],0)
idxUL = (imageIdx*imageH+YfloorInt)*imageW+XfloorInt
idxUR = (imageIdx*imageH+YfloorInt)*imageW+XceilInt
idxBL = (imageIdx*imageH+YceilInt)*imageW+XfloorInt
idxBR = (imageIdx*imageH+YceilInt)*imageW+XceilInt
idxOutside = tf.fill([batchSize,H,W],batchSize*imageH*imageW)
def insideIm(Xint,Yint):
return (Xint>=0)&(Xint<imageW)&(Yint>=0)&(Yint<imageH)
idxUL = tf.where(insideIm(XfloorInt,YfloorInt),idxUL,idxOutside)
idxUR = tf.where(insideIm(XceilInt,YfloorInt),idxUR,idxOutside)
idxBL = tf.where(insideIm(XfloorInt,YceilInt),idxBL,idxOutside)
idxBR = tf.where(insideIm(XceilInt,YceilInt),idxBR,idxOutside)
# bilinear interpolation
Xratio = tf.reshape(Xwarp-Xfloor,[batchSize,H,W,1])
Yratio = tf.reshape(Ywarp-Yfloor,[batchSize,H,W,1])
ImUL = tf.to_float(tf.gather(imageVecOutside,idxUL))*(1-Xratio)*(1-Yratio)
ImUR = tf.to_float(tf.gather(imageVecOutside,idxUR))*(Xratio)*(1-Yratio)
ImBL = tf.to_float(tf.gather(imageVecOutside,idxBL))*(1-Xratio)*(Yratio)
ImBR = tf.to_float(tf.gather(imageVecOutside,idxBR))*(Xratio)*(Yratio)
ImWarpBatch = ImUL+ImUR+ImBL+ImBR
ImWarpBatch = tf.identity(ImWarpBatch,name=name)
return ImWarpBatch
示例13: cnn
def cnn(model, config, scope, connect = None):
with tf.variable_scope(scope), tf.name_scope(scope):
with tf.variable_scope('inputs'), tf.name_scope('inputs'):
sizes = {size: config.getint(scope, '%s_size' %size) for size in ['clength', 'cstep', 'plength', 'pstep']}
if connect is None:
model['%s_in0length' %scope] = config.getint('global', 'batch_size')
model['%s_in1length' %scope] = config.getint('global', 'input_size')
model['%s_in2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_in2length' %scope)
model['%s_maxin2length' %scope] = config.getint('global', 'time_size')
model['%s_inputs' %scope] = tf.placeholder(tf.float32, [model['%s_maxin2length' %scope], model['%s_in0length' %scope], model['%s_in1length' %scope]], '%s_inputs' %scope)
else:
model['%s_in0length' %scope] = model['%s_out0length' %connect]
model['%s_in1length' %scope] = model['%s_out1length' %connect]
model['%s_in2length' %scope] = model['%s_out2length' %connect]
model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
model['%s_inputs' %scope] = model['%s_outputs' %connect]
model['%s_transform' %scope] = tf.transpose(tf.reshape(model['%s_inputs' %scope], [model['%s_maxin2length' %scope], model['%s_in0length' %scope], model['%s_in1length' %scope], 1]), [1, 0, 2, 3], '%s_transform' %scope)
model['%s_out0length' %scope] = model['%s_in0length' %scope]
model['%s_out1length' %scope] = model['%s_in1length' %scope]
model['%s_out2length' %scope] = model['%s_in2length' %scope]
model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]
for _ in xrange(config.getint(scope, 'layer_size')):
if _ == 0: model['%s_transform%i' %(scope, _)] = model['%s_transform' %scope]
else: model['%s_transform%i' %(scope, _)] = model['%s_pooling%i' %(scope, _ - 1)]
with tf.variable_scope('filter%i' %_), tf.name_scope('filter%s' %_):
model['%s_filter%i' %(scope, _)] = tf.Variable(tf.truncated_normal([sizes['clength'], sizes['clength'], 1, 1]))
model['%s_stride%i' %(scope, _)] = [1, sizes['cstep'], sizes['cstep'], 1]
with tf.variable_scope('convolution%i' %_), tf.name_scope('convolution%i' %_):
model['%s_convolution%i' %(scope, _)] = tf.nn.conv2d(model['%s_transform%i' %(scope, _)], model['%s_filter%i' %(scope, _)], model['%s_stride%i' %(scope, _)], 'VALID')
model['%s_out1length' %scope] = int(math.ceil(float(model['%s_out1length' %scope] - sizes['clength'] + 1) / float(sizes['cstep'])))
model['%s_out2length' %scope] = tf.to_int32(tf.ceil(tf.div(tf.to_float(tf.subtract(model['%s_out2length' %scope], sizes['clength'] - 1)), tf.to_float(sizes['cstep']))))
model['%s_maxout2length' %scope] = int(math.ceil(float(model['%s_maxout2length' %scope] - sizes['clength'] + 1) / float(sizes['cstep'])))
model['%s_pooling%i' %(scope, _)] = getattr(tf.nn, '%s_pool' %config.get(scope, 'pool'))(model['%s_convolution%i' %(scope, _)], [1, sizes['plength'], sizes['plength'], 1], [1, sizes['pstep'], sizes['pstep'], 1], 'VALID')
model['%s_out1length' %scope] = int(math.ceil(float(model['%s_out1length' %scope] - sizes['plength'] + 1) / float(sizes['pstep'])))
model['%s_out2length' %scope] = tf.to_int32(tf.ceil(tf.div(tf.to_float(tf.subtract(model['%s_out2length' %scope], sizes['plength'] - 1)), tf.to_float(sizes['pstep']))))
model['%s_maxout2length' %scope] = int(math.ceil(float(model['%s_maxout2length' %scope] - sizes['plength'] + 1) / float(sizes['pstep'])))
with tf.variable_scope('outputs'), tf.name_scope('outputs'):
model['%s_outputs' %scope] = tf.transpose(tf.squeeze(model['%s_pooling%i' %(scope, _)], [3], '%s_outputs' %scope), [1, 0, 2])
return model
示例14: clampSlice
def clampSlice(self, shouldCeil, transformedCoordinates, index):
coordinateSlice = tf.slice(transformedCoordinates, [0, index], [tf.shape(transformedCoordinates)[0], 1])
if not shouldCeil:
result = tf.floor(coordinateSlice)
else:
result = tf.ceil(coordinateSlice)
return result
示例15: _compare
def _compare(self, x, use_gpu):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofloor, oceil = tf.floor(inx), tf.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)