本文整理汇总了Python中tensorflow.rint方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.rint方法的具体用法?Python tensorflow.rint怎么用?Python tensorflow.rint使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.rint方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_accuracy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def build_accuracy(logits, labels, mask, loss_type):
mask = tf.cast(mask, tf.float32)
if loss_type == 'contrastive_loss':
temp_sim = tf.subtract(tf.ones_like(logits), tf.rint(logits), name="temp_sim") #auto threshold 0.5
correct = tf.equal(
tf.cast(temp_sim, tf.float32),
tf.cast(labels, tf.float32)
)
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)*mask)/(1e-10+tf.reduce_sum(mask))
elif loss_type == 'exponent_neg_manhattan_distance_mse':
temp_sim = tf.rint(logits)
correct = tf.equal(
tf.cast(temp_sim, tf.float32),
tf.cast(labels, tf.float32)
)
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)*mask)/(1e-10+tf.reduce_sum(mask))
return accuracy
示例2: opencv_wrapper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def opencv_wrapper(imgs, opencv_func, argv):
ret_imgs = []
imgs_copy = imgs
if imgs.shape[3] == 1:
imgs_copy = np.squeeze(imgs)
for img in imgs_copy:
img_uint8 = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8)
ret_img = opencv_func(*[img_uint8]+argv)
if type(ret_img) == tuple:
ret_img = ret_img[1]
ret_img = ret_img.astype(np.float32) / 255.
ret_imgs.append(ret_img)
ret_imgs = np.stack(ret_imgs)
if imgs.shape[3] == 1:
ret_imgs = np.expand_dims(ret_imgs, axis=3)
return ret_imgs
# Binary filters.
示例3: _smallest_size_at_least
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(tf.rint(height * scale))
new_width = tf.to_int32(tf.rint(width * scale))
return new_height, new_width
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例5: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
trainable=True,name="W")
self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
#self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
#self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例6: feature_squeeze
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def feature_squeeze(images, dataset='cifar'):
# color depth reduction
if dataset == 'cifar':
npp = 2 ** 5
elif dataset == 'mnist':
npp = 2 ** 3
npp_int = npp - 1
images = images / 255.
x_int = tf.rint(tf.multiply(images, npp_int))
x_float = tf.div(x_int, npp_int)
return median_filtering_2x2(x_float, dataset=dataset)
示例7: build_accuracy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def build_accuracy(logits, labels, mask):
temp_sim = tf.rint(logits)
correct = tf.equal(
tf.cast(temp_sim, tf.float32),
tf.cast(labels, tf.float32)
)
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)*mask)/(1e-10+tf.reduce_sum(mask))
return accuracy
示例8: quantize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def quantize(x):
abs_value = tf.abs(x)
vmax = tf.reduce_max(abs_value)
s = tf.divide(vmax, 127.)
x = tf.divide(x, s)
x = tf.rint(x)
return x, s
示例9: reduce_precision_py
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def reduce_precision_py(x, npp):
"""
Reduce the precision of image, the numpy version.
:param x: a float tensor, which has been scaled to [0, 1].
:param npp: number of possible values per pixel. E.g. it's 256 for 8-bit gray-scale image, and 2 for binarized image.
:return: a tensor representing image(s) with lower precision.
"""
# Note: 0 is a possible value too.
npp_int = npp - 1
x_int = np.rint(x * npp_int)
x_float = x_int / npp_int
return x_float
示例10: reduce_precision_tf
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def reduce_precision_tf(x, npp):
"""
Reduce the precision of image, the tensorflow version.
"""
npp_int = npp - 1
x_int = tf.rint(tf.multiply(x, npp_int))
x_float = tf.div(x_int, npp_int)
return x_float
示例11: reduce_precision_np
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import rint [as 别名]
def reduce_precision_np(x, npp):
"""
Reduce the precision of image, the numpy version.
:param x: a float tensor, which has been scaled to [0, 1].
:param npp: number of possible values per pixel. E.g. it's 256 for 8-bit gray-scale image, and 2 for binarized image.
:return: a tensor representing image(s) with lower precision.
"""
# Note: 0 is a possible value too.
npp_int = npp - 1
x_int = np.rint(x * npp_int)
x_float = x_int / npp_int
return x_float