本文整理汇总了Python中tensorflow.subtract方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.subtract方法的具体用法?Python tensorflow.subtract怎么用?Python tensorflow.subtract使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.subtract方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def preprocess_image(image, output_height, output_width, is_training):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
image = tf.to_float(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.subtract(image, 128.0)
image = tf.div(image, 128.0)
return image
示例2: flip_boxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def flip_boxes(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
# Flip boxes.
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
scale = 0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
self.weights['w1']),
self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
示例4: _build_qnet
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def _build_qnet(self):
"""
Build q-network
"""
with tf.variable_scope(self.scope):
self.state_input = tf.placeholder(tf.float32, [None, self.state_size])
self.action = tf.placeholder(tf.int32, [None])
self.target_q = tf.placeholder(tf.float32, [None])
fc1 = tf_utils.fc(self.state_input, n_output=self.n_hidden_1, activation_fn=tf.nn.relu)
fc2 = tf_utils.fc(fc1, n_output=self.n_hidden_2, activation_fn=tf.nn.relu)
self.q_values = tf_utils.fc(fc2, self.action_size, activation_fn=None)
action_mask = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
q_value_pred = tf.reduce_sum(self.q_values * action_mask, 1)
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.target_q, q_value_pred)))
self.optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
示例5: get_value_updater
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum):
tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0))
tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1))
tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0)
if self.has_prior:
tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum)
tf_s, tf_u, _ = tf.svd(tf_new_covariance)
tf_required_eigvals = tf_s[:self.rank]
tf_required_eigvecs = tf_u[:, :self.rank]
tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest
tf_new_eigvals = tf_required_eigvals - tf_new_baseline
tf_new_eigvecs = tf.transpose(tf_required_eigvecs)
return tf.group(
self.tf_baseline.assign(tf_new_baseline),
self.tf_eigvals.assign(tf_new_eigvals),
self.tf_eigvecs.assign(tf_new_eigvecs)
)
示例6: _flip_boxes_left_right
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
示例7: _flip_boxes_up_down
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def _flip_boxes_up_down(boxes):
"""Up-down flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_ymin = tf.subtract(1.0, ymax)
flipped_ymax = tf.subtract(1.0, ymin)
flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)
return flipped_boxes
示例8: _rot90_boxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def _rot90_boxes(boxes):
"""Rotate boxes counter-clockwise by 90 degrees.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Rotated boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
rotated_ymin = tf.subtract(1.0, xmax)
rotated_ymax = tf.subtract(1.0, xmin)
rotated_xmin = ymin
rotated_xmax = ymax
rotated_boxes = tf.concat(
[rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)
return rotated_boxes
示例9: normalize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def normalize(gt_image, gt_binary_image, gt_instance_image):
"""
Normalize the image data by substracting the imagenet mean value
:param gt_image:
:param gt_binary_image:
:param gt_instance_image:
:return:
"""
if gt_image.get_shape().as_list()[-1] != 3 \
or gt_binary_image.get_shape().as_list()[-1] != 1 \
or gt_instance_image.get_shape().as_list()[-1] != 1:
log.error(gt_image.get_shape())
log.error(gt_binary_image.get_shape())
log.error(gt_instance_image.get_shape())
raise ValueError('Input must be of size [height, width, C>0]')
gt_image = tf.cast(gt_image, dtype=tf.float32)
gt_image = tf.subtract(tf.divide(gt_image, tf.constant(127.5, dtype=tf.float32)),
tf.constant(1.0, dtype=tf.float32))
return gt_image, gt_binary_image, gt_instance_image
示例10: _extract_features_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def _extract_features_batch(self, serialized_batch):
features = tf.parse_example(
serialized_batch,
features={'images': tf.FixedLenFeature([], tf.string),
'imagepaths': tf.FixedLenFeature([], tf.string),
'labels': tf.VarLenFeature(tf.int64),
})
bs = features['images'].shape[0]
images = tf.decode_raw(features['images'], tf.uint8)
w, h = tuple(CFG.ARCH.INPUT_SIZE)
images = tf.cast(x=images, dtype=tf.float32)
#images = tf.subtract(tf.divide(images, 128.0), 1.0)
images = tf.reshape(images, [bs, h, -1, CFG.ARCH.INPUT_CHANNELS])
labels = features['labels']
labels = tf.cast(labels, tf.int32)
imagepaths = features['imagepaths']
return images, labels, imagepaths
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:23,代码来源:read_tfrecord.py
示例11: log_coral_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
# regularized covariances result in inf or nan
# First: subtract the mean from the data matrix
batch_size = tf.to_float(tf.shape(h_src)[0])
h_src = h_src - tf.reduce_mean(h_src, axis=0)
h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
#eigen decomposition
eig_source = tf.self_adjoint_eig(cov_source)
eig_target = tf.self_adjoint_eig(cov_target)
log_cov_source = tf.matmul( eig_source[1] , tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
log_cov_target = tf.matmul( eig_target[1] , tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )
# Returns the Frobenius norm
return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target)))
#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src)))
示例12: bi_linear_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def bi_linear_sample(self, img_feat, n, x, y):
x1 = tf.floor(x)
x2 = tf.ceil(x)
y1 = tf.floor(y)
y2 = tf.ceil(y)
Q11 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y1, tf.int32)], 1))
Q12 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y2, tf.int32)], 1))
Q21 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y1, tf.int32)], 1))
Q22 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y2, tf.int32)], 1))
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y2, y))
Q11 = tf.multiply(tf.expand_dims(weights, 1), Q11)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y2, y))
Q21 = tf.multiply(tf.expand_dims(weights, 1), Q21)
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y, y1))
Q12 = tf.multiply(tf.expand_dims(weights, 1), Q12)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y, y1))
Q22 = tf.multiply(tf.expand_dims(weights, 1), Q22)
outputs = tf.add_n([Q11, Q21, Q12, Q22])
return outputs
示例13: orthogonal_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def orthogonal_regularizer(scale) :
""" Defining the Orthogonal regularizer and return the function at last to be used in Conv layer as kernel regularizer"""
def ortho_reg(w) :
""" Reshaping the matrxi in to 2D tensor for enforcing orthogonality"""
_, _, _, c = w.get_shape().as_list()
w = tf.reshape(w, [-1, c])
""" Declaring a Identity Tensor of appropriate size"""
identity = tf.eye(c)
""" Regularizer Wt*W - I """
w_transpose = tf.transpose(w)
w_mul = tf.matmul(w_transpose, w)
reg = tf.subtract(w_mul, identity)
"""Calculating the Loss Obtained"""
ortho_loss = tf.nn.l2_loss(reg)
return scale * ortho_loss
return ortho_reg
示例14: orthogonal_regularizer_fully
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def orthogonal_regularizer_fully(scale) :
""" Defining the Orthogonal regularizer and return the function at last to be used in Fully Connected Layer """
def ortho_reg_fully(w) :
""" Reshaping the matrix in to 2D tensor for enforcing orthogonality"""
_, c = w.get_shape().as_list()
"""Declaring a Identity Tensor of appropriate size"""
identity = tf.eye(c)
w_transpose = tf.transpose(w)
w_mul = tf.matmul(w_transpose, w)
reg = tf.subtract(w_mul, identity)
""" Calculating the Loss """
ortho_loss = tf.nn.l2_loss(reg)
return scale * ortho_loss
return ortho_reg_fully
示例15: triplet_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import subtract [as 别名]
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss