本文整理匯總了Python中autograd.numpy.minimum方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.minimum方法的具體用法?Python numpy.minimum怎麽用?Python numpy.minimum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類autograd.numpy
的用法示例。
在下文中一共展示了numpy.minimum方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: is_a_scores_vector_batch
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
norm_parents = np.linalg.norm(parent_vectors, axis=1)
norms_other = np.linalg.norm(other_vectors, axis=1)
euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
if not rel_reversed:
cos_angles_child = (norms_other**2 - norm_parents**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_parents) # 1 + neg_size
angles_psi_parent = np.arcsin(K / norm_parents) # scalar
else:
cos_angles_child = (norm_parents**2 - norms_other**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_other) # 1 + neg_size
angles_psi_parent = np.arcsin(K / norms_other) # 1 + neg_size
assert not np.isnan(cos_angles_child).any()
clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = np.arccos(clipped_cos_angle_child) # (1 + neg_size, batch_size)
return np.maximum(0, angles_child - angles_psi_parent)
示例2: bound_by_data
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def bound_by_data(Z, Data):
"""
Determine lower and upper bound for each dimension from the Data, and project
Z so that all points in Z live in the bounds.
Z: m x d
Data: n x d
Return a projected Z of size m x d.
"""
n, d = Z.shape
Low = np.min(Data, 0)
Up = np.max(Data, 0)
LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)
Z = np.maximum(LowMat, Z)
Z = np.minimum(UpMat, Z)
return Z
示例3: constraint_c2
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def constraint_c2(f, r):
n_obj = f.shape[1]
v1 = anp.inf * anp.ones(f.shape[0])
for i in range(n_obj):
temp = (f[:, i] - 1) ** 2 + (anp.sum(f ** 2, axis=1) - f[:, i] ** 2) - r ** 2
v1 = anp.minimum(temp.flatten(), v1)
a = 1 / anp.sqrt(n_obj)
v2 = anp.sum((f - a) ** 2, axis=1) - r ** 2
g = anp.minimum(v1, v2.flatten())
return g
示例4: _compute_loss
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True
self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
euclidean_dists_sq = self.euclidean_dists ** 2
if not self.rels_reversed:
# (1 + neg_size, batch_size)
child_numerator = self.norms_v_sq - self.norms_u_sq - euclidean_dists_sq
self.child_numitor = 2 * self.euclidean_dists * self.norms_u
self.angles_psi_parent = np.arcsin(self.K / self.norms_u) # (1, batch_size)
else:
# (1 + neg_size, batch_size)
child_numerator = self.norms_u_sq - self.norms_v_sq - euclidean_dists_sq
self.child_numitor = 2 * self.euclidean_dists * self.norms_v
self.angles_psi_parent = np.arcsin(self.K / self.norms_v) # (1 + neg_size, batch_size)
self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child) # (1 + neg_size, batch_size)
self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss
示例5: _compute_loss
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True
self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)
self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
self.g_sqrt = np.sqrt(self.g)
self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt
if not self.rels_reversed:
# u is x , v is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)
else:
# v is x , u is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)
self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child) # (1 + neg_size, batch_size)
self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss
示例6: plot_gaussian_mixture
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def plot_gaussian_mixture(params, ax):
for log_proportion, mean, cov_sqrt in zip(*unpack_gmm_params(params)):
alpha = np.minimum(1.0, np.exp(log_proportion) * 10)
plot_ellipse(ax, mean, cov_sqrt, alpha)
示例7: test_minimum
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def test_minimum(): combo_check(np.minimum, [0, 1])(
[R(1), R(1,4), R(3, 4)],
[R(1), R(1,4), R(3, 4)])
示例8: _loss_fn
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def _loss_fn(self, matrix, rels_reversed):
"""Given a numpy array with vectors for u, v and negative samples, computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
rels_reversed : bool
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
norm_u = grad_np.linalg.norm(vector_u)
norms_v = grad_np.linalg.norm(vectors_v, axis=1)
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
if not rels_reversed:
# u is x , v is y
cos_angle_child = (norms_v**2 - norm_u**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_u) # 1 + neg_size
angles_psi_parent = grad_np.arcsin(self.K / norm_u) # scalar
else:
# v is x , u is y
cos_angle_child = (norm_u**2 - norms_v**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_v) # 1 + neg_size
angles_psi_parent = grad_np.arcsin(self.K / norms_v) # 1 + neg_size
# To avoid numerical errors
clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = grad_np.arccos(clipped_cos_angle_child) # 1 + neg_size
energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
positive_term = energy_vec[0]
negative_terms = energy_vec[1:]
return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum()
示例9: _loss_fn
# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import minimum [as 別名]
def _loss_fn(self, matrix, rels_reversed):
"""Given a numpy array with vectors for u, v and negative samples, computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
rels_reversed : bool
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
norm_u = grad_np.linalg.norm(vector_u)
norms_v = grad_np.linalg.norm(vectors_v, axis=1)
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
dot_prod = (vector_u * vectors_v).sum(axis=1)
if not rels_reversed:
# u is x , v is y
cos_angle_child = (dot_prod * (1 + norm_u ** 2) - norm_u ** 2 * (1 + norms_v ** 2)) /\
(norm_u * euclidean_dists * grad_np.sqrt(1 + norms_v ** 2 * norm_u ** 2 - 2 * dot_prod))
angles_psi_parent = grad_np.arcsin(self.K * (1 - norm_u**2) / norm_u) # scalar
else:
# v is x , u is y
cos_angle_child = (dot_prod * (1 + norms_v ** 2) - norms_v **2 * (1 + norm_u ** 2) ) /\
(norms_v * euclidean_dists * grad_np.sqrt(1 + norms_v**2 * norm_u**2 - 2 * dot_prod))
angles_psi_parent = grad_np.arcsin(self.K * (1 - norms_v**2) / norms_v) # 1 + neg_size
# To avoid numerical errors
clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = grad_np.arccos(clipped_cos_angle_child) # 1 + neg_size
energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
positive_term = energy_vec[0]
negative_terms = energy_vec[1:]
return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum()