本文整理汇总了Python中tensorflow.python.ops.math_ops.ceil方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.ceil方法的具体用法?Python math_ops.ceil怎么用?Python math_ops.ceil使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.ceil方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _sample_n(self, n, seed=None):
low = self._low
high = self._high
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if low is not None:
result_so_far = array_ops.where(result_so_far < low,
low * ones, result_so_far)
if high is not None:
result_so_far = array_ops.where(result_so_far > high,
high * ones, result_so_far)
return result_so_far
示例2: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _sample_n(self, n, seed=None):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if lower_cutoff is not None:
result_so_far = array_ops.where(result_so_far < lower_cutoff,
lower_cutoff * ones, result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(result_so_far > upper_cutoff,
upper_cutoff * ones, result_so_far)
return result_so_far
示例3: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _sample_n(self, n, seed=None):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample_n(n=n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if lower_cutoff is not None:
result_so_far = math_ops.select(result_so_far < lower_cutoff,
lower_cutoff * ones, result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(result_so_far > upper_cutoff,
upper_cutoff * ones, result_so_far)
return result_so_far
示例4: test_all_unary_elemwise
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def test_all_unary_elemwise():
_test_forward_unary_elemwise(_test_abs)
_test_forward_unary_elemwise(_test_floor)
_test_forward_unary_elemwise(_test_exp)
_test_forward_unary_elemwise(_test_log)
_test_forward_unary_elemwise(_test_sin)
_test_forward_unary_elemwise(_test_sqrt)
_test_forward_unary_elemwise(_test_rsqrt)
_test_forward_unary_elemwise(_test_neg)
_test_forward_unary_elemwise(_test_square)
# ceil and cos come with TFLite 1.14.0.post1 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_forward_unary_elemwise(_test_ceil)
_test_forward_unary_elemwise(_test_cos)
_test_forward_unary_elemwise(_test_round)
# This fails with TF and Tflite 1.15.2, this could not have been tested
# in CI or anywhere else. The failure mode is that we see a backtrace
# from the converter that we need to provide a custom Tan operator
# implementation.
#_test_forward_unary_elemwise(_test_tan)
_test_forward_unary_elemwise(_test_elu)
#######################################################################
# Element-wise
# ------------
示例5: _next_array_size
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32))
/ math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor ** exponent), dtypes.int32)
示例6: _log_survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _log_survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)
return result_so_far
示例7: _survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
示例8: _log_survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _log_survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = array_ops.where(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= upper_cutoff, neg_inf, result_so_far)
return result_so_far
示例9: _survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = array_ops.where(j < lower_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
示例10: setUp
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, math_ops.abs, core.abs_function),
('neg', operator.neg, math_ops.negative, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, math_ops.sign, core.sign),
('reciprocal', None, math_ops.reciprocal, core.reciprocal),
('square', None, math_ops.square, core.square),
('round', None, math_ops.round, core.round_function),
('sqrt', None, math_ops.sqrt, core.sqrt),
('rsqrt', None, math_ops.rsqrt, core.rsqrt),
('log', None, math_ops.log, core.log),
('exp', None, math_ops.exp, core.exp),
('log', None, math_ops.log, core.log),
('ceil', None, math_ops.ceil, core.ceil),
('floor', None, math_ops.floor, core.floor),
('cos', None, math_ops.cos, core.cos),
('sin', None, math_ops.sin, core.sin),
('tan', None, math_ops.tan, core.tan),
('acos', None, math_ops.acos, core.acos),
('asin', None, math_ops.asin, core.asin),
('atan', None, math_ops.atan, core.atan),
('lgamma', None, math_ops.lgamma, core.lgamma),
('digamma', None, math_ops.digamma, core.digamma),
('erf', None, math_ops.erf, core.erf),
('erfc', None, math_ops.erfc, core.erfc),
('lgamma', None, math_ops.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
示例11: _next_array_size
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) /
math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
示例12: _log_survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _log_survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)
return result_so_far
示例13: _survival_function
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
示例14: _test_ceil
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def _test_ceil(data):
""" One iteration of ceil """
return _test_unary_elemwise(math_ops.ceil, data)
#######################################################################
# Floor
# -----
示例15: frames
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import ceil [as 别名]
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames