本文整理汇总了Python中tensorflow.keras.backend.min方法的典型用法代码示例。如果您正苦于以下问题:Python backend.min方法的具体用法?Python backend.min怎么用?Python backend.min使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.min方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_min_max_exponents
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def _get_min_max_exponents(non_sign_bits, need_exponent_sign_bit,
quadratic_approximation):
"""Given a bitwidth, gets min and max exponents that it can represent.
Args:
non_sign_bits: An integer representing the bitwidth of the exponent.
need_exponent_sign_bit: An integer representing whether it needs sign bit
in exponent. (1: need sign bit. 0: sign bit is not needed.)
quadratic_approximation: A boolean representing whether the quadratic
approximiation method is enforced.
Returns:
A tuple of integers: min_exp, max_exp
"""
effect_bits = non_sign_bits - need_exponent_sign_bit
min_exp = -2**(effect_bits)
max_exp = 2**(effect_bits) - 1
if quadratic_approximation:
max_exp = 2 * (max_exp // 2)
return min_exp, max_exp
示例2: min
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def min(self):
"""Get minimum value that quantized_bits class can represent."""
if not self.keep_negative:
return 0.0
unsigned_bits = self.bits - self.keep_negative
if unsigned_bits > 0:
return -max(1.0, np.power(2.0, self.integer))
else:
return -1.0
示例3: pn_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def pn_loss(margin=1):
def _pn_loss(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _euclidean_distance(anchor, positive)
anchor_negative_distance = _euclidean_distance(anchor, negative)
positive_negative_distance = _euclidean_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
return K.mean(K.maximum(anchor_positive_distance - minimum_distance + margin, 0))
return _pn_loss
示例4: manhattan_pn_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def manhattan_pn_loss(margin=1):
def _pn_loss(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _manhattan_distance(anchor, positive)
anchor_negative_distance = _manhattan_distance(anchor, negative)
positive_negative_distance = _manhattan_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
return K.mean(K.maximum(anchor_positive_distance - minimum_distance + margin, 0))
return _pn_loss
示例5: chebyshev_pn_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def chebyshev_pn_loss(margin=1):
def _pn_loss(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _chebyshev_distance(anchor, positive)
anchor_negative_distance = _chebyshev_distance(anchor, negative)
positive_negative_distance = _chebyshev_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
return K.mean(K.maximum(anchor_positive_distance - minimum_distance + margin, 0))
return _pn_loss
示例6: cosine_pn_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def cosine_pn_loss(margin=1):
def _pn_loss(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _cosine_distance(anchor, positive)
anchor_negative_distance = _cosine_distance(anchor, negative)
positive_negative_distance = _cosine_distance(positive, negative)
minimum_distance = K.min(tf.stack([anchor_negative_distance, positive_negative_distance]), axis=0, keepdims=True)
return K.mean(K.maximum(anchor_positive_distance - minimum_distance + margin, 0))
return _pn_loss
示例7: softmax_ratio_pn
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def softmax_ratio_pn(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _euclidean_distance(anchor, positive)
anchor_negative_distance = _euclidean_distance(anchor, negative)
positive_negative_distance = _euclidean_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
softmax = K.softmax(K.concatenate([anchor_positive_distance, minimum_distance]))
ideal_distance = K.variable([0, 1])
return K.mean(K.maximum(softmax - ideal_distance, 0))
示例8: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def call(self, inputs, **kwargs):
return K.min(inputs, axis=1)
示例9: _check_series_length
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def _check_series_length(self, X):
"""Ensures that time series in X matches the following requirements:
- their length is greater than the size of the longest shapelet
- (at predict time) their length is lower than the maximum allowed
length, as set by self.max_size
"""
sizes = numpy.array([ts_size(Xi) for Xi in X])
self._min_sz_fit = sizes.min()
if self.n_shapelets_per_size is not None:
max_sz_shp = max(self.n_shapelets_per_size.keys())
if max_sz_shp > self._min_sz_fit:
raise ValueError("Sizes in X do not match maximum "
"shapelet size: there is at least one "
"series in X that is shorter than one of the "
"shapelets. Shortest time series is of "
"length {} and longest shapelet is of length "
"{}".format(self._min_sz_fit, max_sz_shp))
if hasattr(self, 'model_') or self.max_size is not None:
# Model is already fitted
max_sz_X = sizes.max()
if hasattr(self, 'model_'):
max_size = self._X_fit_dims[1]
else:
max_size = self.max_size
if max_size < max_sz_X:
raise ValueError("Sizes in X do not match maximum allowed "
"size as set by max_size. "
"Longest time series is of "
"length {} and max_size is "
"{}".format(max_sz_X, max_size))
示例10: _batch_hard_triplet_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def _batch_hard_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
mask_anchor_positive = self._get_anchor_positive_triplet_mask(y_true, pairwise_dist)
anchor_positive_dist = mask_anchor_positive * pairwise_dist
hardest_positive_dist = K.max(anchor_positive_dist, axis=1, keepdims=True)
mask_anchor_negative = self._get_anchor_negative_triplet_mask(y_true, pairwise_dist)
anchor_negative_dist = mask_anchor_negative * pairwise_dist
mask_anchor_negative = self._get_semihard_anchor_negative_triplet_mask(anchor_negative_dist,
hardest_positive_dist,
mask_anchor_negative)
max_anchor_negative_dist = K.max(pairwise_dist, axis=1, keepdims=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
hardest_negative_dist = K.min(anchor_negative_dist, axis=1, keepdims=True)
triplet_loss = K.clip(hardest_positive_dist - hardest_negative_dist + self.margin, 0.0, None)
triplet_loss = K.mean(triplet_loss)
return triplet_loss
示例11: yolo3_correct_boxes
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def yolo3_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
input_shape = K.cast(input_shape, K.dtype(box_xy))
image_shape = K.cast(image_shape, K.dtype(box_xy))
#reshape the image_shape tensor to align with boxes dimension
image_shape = K.reshape(image_shape, [-1, 1, 1, 1, 2])
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
# reverse offset/scale to match (w,h) order
offset = offset[..., ::-1]
scale = scale[..., ::-1]
box_xy = (box_xy - offset) * scale
box_wh *= scale
box_mins = box_xy - (box_wh / 2.)
box_maxes = box_xy + (box_wh / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # x_min
box_mins[..., 1:2], # y_min
box_maxes[..., 0:1], # x_max
box_maxes[..., 1:2] # y_max
])
# Scale boxes back to original image shape.
image_wh = image_shape[..., ::-1]
boxes *= K.concatenate([image_wh, image_wh])
return boxes
示例12: yolo2_correct_boxes
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def yolo2_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
input_shape = K.cast(input_shape, K.dtype(box_xy))
image_shape = K.cast(image_shape, K.dtype(box_xy))
#reshape the image_shape tensor to align with boxes dimension
image_shape = K.reshape(image_shape, [-1, 1, 1, 1, 2])
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
# reverse offset/scale to match (w,h) order
offset = offset[..., ::-1]
scale = scale[..., ::-1]
box_xy = (box_xy - offset) * scale
box_wh *= scale
box_mins = box_xy - (box_wh / 2.)
box_maxes = box_xy + (box_wh / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # x_min
box_mins[..., 1:2], # y_min
box_maxes[..., 0:1], # x_max
box_maxes[..., 1:2] # y_max
])
# Scale boxes back to original image shape.
image_wh = image_shape[..., ::-1]
boxes *= K.concatenate([image_wh, image_wh])
return boxes
示例13: grabocka_params_to_shapelet_size_dict
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import min [as 别名]
def grabocka_params_to_shapelet_size_dict(n_ts, ts_sz, n_classes, l, r):
"""Compute number and length of shapelets.
This function uses the heuristic from [1]_.
Parameters
----------
n_ts: int
Number of time series in the dataset
ts_sz: int
Length of time series in the dataset
n_classes: int
Number of classes in the dataset
l: float
Fraction of the length of time series to be used for base shapelet
length
r: int
Number of different shapelet lengths to use
Returns
-------
dict
Dictionary giving, for each shapelet length, the number of such
shapelets to be generated
Examples
--------
>>> d = grabocka_params_to_shapelet_size_dict(
... n_ts=100, ts_sz=100, n_classes=3, l=0.1, r=2)
>>> keys = sorted(d.keys())
>>> print(keys)
[10, 20]
>>> print([d[k] for k in keys])
[4, 4]
References
----------
.. [1] J. Grabocka et al. Learning Time-Series Shapelets. SIGKDD 2014.
"""
base_size = int(l * ts_sz)
base_size = max(base_size, 1)
r = min(r, ts_sz)
d = {}
for sz_idx in range(r):
shp_sz = base_size * (sz_idx + 1)
n_shapelets = int(numpy.log10(n_ts *
(ts_sz - shp_sz + 1) *
(n_classes - 1)))
n_shapelets = max(1, n_shapelets)
d[shp_sz] = n_shapelets
return d