本文整理汇总了Python中tensorflow.python.ops.math_ops.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ceil函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _log_survival_function
def _log_survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)
return result_so_far
示例2: __call__
def __call__(self, step):
with ops.name_scope(
self.name, "PolynomialDecay",
[self.initial_learning_rate, step, self.decay_steps,
self.end_learning_rate, self.power]
) as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
self.decay_steps)
p = math_ops.div(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
示例3: _survival_function
def _survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
示例4: decayed_lr
def decayed_lr(learning_rate, global_step, decay_steps, end_learning_rate,
power, cycle, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(
name, "PolynomialDecay",
[learning_rate, global_step, decay_steps, end_learning_rate, power]
) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
end_learning_rate = math_ops.cast(end_learning_rate, dtype)
power = math_ops.cast(power, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
decay_steps_recomp = math_ops.cast(decay_steps, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
p = math_ops.div(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
示例5: frames
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames
示例6: _enclosing_power_of_two
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
示例7: _compare
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
示例8: _compare
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
示例9: testProbAndGradGivesFiniteResultsForCommonEvents
def testProbAndGradGivesFiniteResultsForCommonEvents(self):
with self.test_session():
mu = variables.Variable(0.0, name="mu")
sigma = variables.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
mu=mu, sigma=sigma))
x = math_ops.ceil(4 * rng.rand(100).astype(np.float32) - 2)
variables.global_variables_initializer().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = gradients_impl.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
示例10: _sample_n
def _sample_n(self, n, seed=None):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.base_distribution.sample_n(n=n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if lower_cutoff is not None:
result_so_far = math_ops.select(result_so_far < lower_cutoff, lower_cutoff * ones, result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(result_so_far > upper_cutoff, upper_cutoff * ones, result_so_far)
return result_so_far
示例11: _sample_n
def _sample_n(self, n, seed=None):
low = self._low
high = self._high
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if low is not None:
result_so_far = array_ops.where(result_so_far < low,
low * ones, result_so_far)
if high is not None:
result_so_far = array_ops.where(result_so_far > high,
high * ones, result_so_far)
return result_so_far
示例12: decayed_lr
def decayed_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
global_step_recomp = math_ops.cast(global_step, dtype)
decay_steps_recomp = math_ops.cast(decay_steps, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
p = math_ops.div(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
示例13: sequence_length_from_sparse_tensor
def sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):
"""Returns a [batch_size] Tensor with per-example sequence length."""
with ops.name_scope(None, 'sequence_length') as name_scope:
row_ids = sp_tensor.indices[:, 0]
column_ids = sp_tensor.indices[:, 1]
# Add one to convert column indices to element length
column_ids += array_ops.ones_like(column_ids)
# Get the number of elements we will have per example/row
seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids)
# The raw values are grouped according to num_elements;
# how many entities will we have after grouping?
# Example: orig tensor [[1, 2], [3]], col_ids = (0, 1, 1),
# row_ids = (0, 0, 1), seq_length = [2, 1]. If num_elements = 2,
# these will get grouped, and the final seq_length is [1, 1]
seq_length = math_ops.cast(
math_ops.ceil(seq_length / num_elements), dtypes.int64)
# If the last n rows do not have ids, seq_length will have shape
# [batch_size - n]. Pad the remaining values with zeros.
n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]
padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)
return array_ops.concat([seq_length, padding], axis=0, name=name_scope)
示例14: polynomial_decay
def polynomial_decay(exploration_rate, timestep, decay_steps,
end_exploration_rate=0.0001, power=1.0,
cycle=False, name=None):
"""Applies a polynomial decay to the exploration rate.
It is commonly observed that a monotonically decreasing exploration rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`exploration_rate` to reach an `end_exploration_rate` in the given `decay_steps`.
It requires a `timestep` value to compute the decayed exploration rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed exploration rate. It is computed as:
```python
>>> timestep = min(timestep, decay_steps)
>>> decayed_exploration_rate = (exploration_rate - end_exploration_rate) *
... (1 - timestep / decay_steps) ^ (power) + end_exploration_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `timesteps`.
```python
>>> decay_steps = decay_steps * ceil(timestep / decay_steps)
>>> decayed_exploration_rate = (exploration_rate - end_exploration_rate) *
... (1 - timestep / decay_steps) ^ (power) +
... end_exploration_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
>>> timestep = tf.Variable(0, trainable=False)
>>> starter_exploration_rate = 0.1
>>> end_exploration_rate = 0.01
>>> decay_steps = 10000
>>> exploration_rate = tf.train.polynomial_decay(starter_exploration_rate, timestep,
... decay_steps, end_exploration_rate, power=0.5)
>>> # Passing timestep to minimize() will increment it at each step.
>>> learning_step = (
... tf.train.GradientDescentOptimizer(exploration_rate)
... .minimize(...my loss..., timestep=timestep)
... )
```
Args:
exploration_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial exploration rate.
timestep: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_exploration_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end exploration rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `exploration_rate`. The decayed exploration rate.
Raises:
ValueError: if `timestep` is not supplied.
"""
if timestep is None:
raise ValueError("timestep is required for polynomial_decay.")
with get_name_scope(name=name, scope="PolynomialDecay",
values=[exploration_rate, timestep,
decay_steps, end_exploration_rate, power]) as name:
exploration_rate = ops.convert_to_tensor(exploration_rate, name="exploration_rate")
dtype = exploration_rate.dtype
timestep = math_ops.cast(timestep, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
end_exploration_rate = math_ops.cast(end_exploration_rate, dtype)
power = math_ops.cast(power, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than timestep.
decay_steps = math_ops.multiply(decay_steps,
math_ops.ceil(timestep / decay_steps))
else:
# Make sure that the timestep used is not bigger than decay_steps.
timestep = math_ops.minimum(timestep, decay_steps)
p = math_ops.div(timestep, decay_steps)
return math_ops.add(math_ops.multiply(exploration_rate - end_exploration_rate,
math_ops.pow(1 - p, power)),
end_exploration_rate, name=name)
示例15: odeint_fixed
def odeint_fixed(func, y0, t, dt=None, method='rk4', name=None):
"""ODE integration on a fixed grid (with no step size control).
Useful in certain scenarios to avoid the overhead of adaptive step size
control, e.g. when differentiation of the integration result is desired and/or
the time grid is known a priori to be sufficient.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype.
dt: 0-D or 1-D Tensor providing time step suggestion to be used on time
integration intervals in `t`. 1-D Tensor should provide values
for all intervals, must have 1 less element than that of `t`.
If given a 0-D Tensor, the value is interpreted as time step suggestion
same for all intervals. If passed None, then time step is set to be the
t[1:] - t[:-1]. Defaults to None. The actual step size is obtained by
insuring an integer number of steps per interval, potentially reducing the
time step.
method: One of 'midpoint' or 'rk4'.
name: Optional name for the resulting operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: Upon caller errors.
"""
with ops.name_scope(name, 'odeint_fixed', [y0, t, dt]):
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
intervals = t[1:] - t[:-1]
if dt is None:
dt = intervals
dt = ops.convert_to_tensor(dt, preferred_dtype=dtypes.float64, name='dt')
steps_on_intervals = math_ops.ceil(intervals / dt)
dt = intervals / steps_on_intervals
steps_on_intervals = math_ops.cast(steps_on_intervals, dtype=dtypes.int32)
_check_input_types(y0, t, dt)
_check_input_sizes(t, dt)
with _assert_increasing(t):
with ops.name_scope(method):
if method == 'midpoint':
return _MidpointFixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
elif method == 'rk4':
return _RK4FixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
else:
raise ValueError('method not supported: {!s}'.format(method))