本文整理汇总了Python中tensorflow.floor方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.floor方法的具体用法?Python tensorflow.floor怎么用?Python tensorflow.floor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.floor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _quantize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例2: drop_path
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def drop_path(inputs, keep_prob, is_training=True, scope=None):
"""Drops out a whole example hiddenstate with the specified probability.
"""
with tf.name_scope(scope, 'drop_path', [inputs]):
net = inputs
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.floor(random_tensor)
net = tf.div(net, keep_prob) * binary_tensor
return net
# =========================================================================== #
# Useful methods
# =========================================================================== #
示例3: dropout
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def dropout(x, pdrop, *, do_dropout, stateless=True, seed=None, name):
"""Like tf.nn.dropout but stateless.
"""
if stateless:
assert seed is not None
def _dropout():
with tf.name_scope(name):
noise_shape = tf.shape(x)
if stateless:
r = tf.random.stateless_uniform(noise_shape, seed, dtype=x.dtype)
# floor uniform [keep_prob, 1.0 + keep_prob)
mask = tf.floor(1 - pdrop + r)
return x * (mask * (1 / (1 - pdrop)))
else:
return tf.nn.dropout(x, rate=pdrop, noise_shape=noise_shape)
if pdrop == 0 or not do_dropout:
return x
else:
return _dropout()
示例4: bi_linear_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def bi_linear_sample(self, img_feat, n, x, y):
x1 = tf.floor(x)
x2 = tf.ceil(x)
y1 = tf.floor(y)
y2 = tf.ceil(y)
Q11 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y1, tf.int32)], 1))
Q12 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y2, tf.int32)], 1))
Q21 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y1, tf.int32)], 1))
Q22 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y2, tf.int32)], 1))
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y2, y))
Q11 = tf.multiply(tf.expand_dims(weights, 1), Q11)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y2, y))
Q21 = tf.multiply(tf.expand_dims(weights, 1), Q21)
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y, y1))
Q12 = tf.multiply(tf.expand_dims(weights, 1), Q12)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y, y1))
Q22 = tf.multiply(tf.expand_dims(weights, 1), Q22)
outputs = tf.add_n([Q11, Q21, Q12, Q22])
return outputs
示例5: gaussian_diag
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def gaussian_diag(mean, logsd):
class o(object):
pass
o.mean = mean
o.logsd = logsd
o.eps = tf.random_normal(tf.shape(mean))
o.sample = mean + tf.exp(logsd) * o.eps
o.sample2 = lambda eps: mean + tf.exp(logsd) * eps
o.logps = lambda x: -0.5 * \
(np.log(2 * np.pi) + 2. * logsd + (x - mean) ** 2 / tf.exp(2. * logsd))
o.logp = lambda x: flatten_sum(o.logps(x))
o.get_eps = lambda x: (x - mean) / tf.exp(logsd)
return o
# def discretized_logistic_old(mean, logscale, binsize=1 / 256.0, sample=None):
# scale = tf.exp(logscale)
# sample = (tf.floor(sample / binsize) * binsize - mean) / scale
# logp = tf.log(tf.sigmoid(sample + binsize / scale) - tf.sigmoid(sample) + 1e-7)
# return tf.reduce_sum(logp, [1, 2, 3])
示例6: drop_connect
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def drop_connect(inputs, is_training, drop_connect_rate):
"""Apply drop connect."""
if not is_training:
return inputs
# Compute keep_prob
# TODO(tanmingxing): add support for training progress.
keep_prob = 1.0 - drop_connect_rate
# Compute drop_connect tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.div(inputs, keep_prob) * binary_tensor
return output
示例7: drop_connect
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
示例8: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def __call__(self, step):
with tf.name_scope(self.name or "CyclicalLearningRate"):
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype)
step_size = tf.cast(self.step_size, dtype)
cycle = tf.floor(1 + step / (2 * step_size))
x = tf.abs(step / step_size - 2 * cycle + 1)
mode_step = cycle if self.scale_mode == "cycle" else step
return initial_learning_rate + (
maximal_learning_rate - initial_learning_rate
) * tf.maximum(tf.cast(0, dtype), (1 - x)) * self.scale_fn(mode_step)
示例9: preprocess
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def preprocess(self, x):
"""Normalize x.
Args:
x: 4-D Tensor.
Returns:
x: Scaled such that x lies in-between -0.5 and 0.5
"""
n_bits_x = self.hparams.n_bits_x
n_bins = 2**n_bits_x
x = tf.cast(x, dtype=tf.float32)
if n_bits_x < 8:
x = tf.floor(x / 2 ** (8 - n_bits_x))
x = x / n_bins - 0.5
return x
示例10: _compute_one_image_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def _compute_one_image_loss(self, keypoints, offset, size, ground_truth, meshgrid_y, meshgrid_x,
stride, pshape):
slice_index = tf.argmin(ground_truth, axis=0)[0]
ground_truth = tf.gather(ground_truth, tf.range(0, slice_index, dtype=tf.int64))
ngbbox_y = ground_truth[..., 0] / stride
ngbbox_x = ground_truth[..., 1] / stride
ngbbox_h = ground_truth[..., 2] / stride
ngbbox_w = ground_truth[..., 3] / stride
class_id = tf.cast(ground_truth[..., 4], dtype=tf.int32)
ngbbox_yx = ground_truth[..., 0:2] / stride
ngbbox_yx_round = tf.floor(ngbbox_yx)
offset_gt = ngbbox_yx - ngbbox_yx_round
size_gt = ground_truth[..., 2:4] / stride
ngbbox_yx_round_int = tf.cast(ngbbox_yx_round, tf.int64)
keypoints_loss = self._keypoints_loss(keypoints, ngbbox_yx_round_int, ngbbox_y, ngbbox_x, ngbbox_h,
ngbbox_w, class_id, meshgrid_y, meshgrid_x, pshape)
offset = tf.gather_nd(offset, ngbbox_yx_round_int)
size = tf.gather_nd(size, ngbbox_yx_round_int)
offset_loss = tf.reduce_mean(tf.abs(offset_gt - offset))
size_loss = tf.reduce_mean(tf.abs(size_gt - size))
total_loss = keypoints_loss + 0.1*size_loss + offset_loss
return total_loss
示例11: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def __call__(self, inputs, state, scope=None):
output, new_state = self._cell(inputs, state, scope)
if not isinstance(self._cell.state_size, tuple):
new_state = tf.split(value=new_state, num_or_size_splits=2, axis=1)
state = tf.split(value=state, num_or_size_splits=2, axis=1)
final_new_state = [new_state[0], new_state[1]]
if self._is_training:
for i, state_element in enumerate(state):
random_tensor = 1 - self._zoneout_prob # keep probability
random_tensor += tf.random_uniform(tf.shape(state_element))
# 0. if [zoneout_prob, 1.0) and 1. if [1.0, 1.0 + zoneout_prob)
binary_tensor = tf.floor(random_tensor)
final_new_state[
i] = (new_state[i] - state_element) * binary_tensor + state_element
else:
for i, state_element in enumerate(state):
final_new_state[
i] = state_element * self._zoneout_prob + new_state[i] * (
1 - self._zoneout_prob)
if isinstance(self._cell.state_size, tuple):
return output, tf.contrib.rnn.LSTMStateTuple(
final_new_state[0], final_new_state[1])
return output, tf.concat([final_new_state[0], final_new_state[1]], 1)
示例12: fixed_dropout
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def fixed_dropout(xs, keep_prob, noise_shape, seed=None):
"""
Apply dropout with same mask over all inputs
Args:
xs: list of tensors
keep_prob:
noise_shape:
seed:
Returns:
list of dropped inputs
"""
with tf.name_scope("dropout", values=xs):
noise_shape = noise_shape
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, seed=seed, dtype=xs[0].dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
outputs = []
for x in xs:
ret = tf.div(x, keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
outputs.append(ret)
return outputs
示例13: _apply_func_with_prob
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert 'bboxes' == inspect.getargspec(func)[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
示例14: drop_connect
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def drop_connect(inputs, is_training, drop_connect_rate):
"""Apply drop connect."""
if not is_training:
return inputs
# Compute keep_prob
# TODO(tanmingxing): add support for training progress.
keep_prob = 1.0 - drop_connect_rate
# Compute drop_connect tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.div(inputs, keep_prob) * binary_tensor
return output
示例15: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import floor [as 别名]
def call(self, inputs, training=None):
def drop_connect():
keep_prob = 1.0 - self.drop_connect_rate
# Compute drop_connect tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = (inputs / keep_prob) * binary_tensor
return output
return K.in_train_phase(drop_connect, inputs, training=training)