本文整理汇总了Python中autograd.numpy.maximum方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.maximum方法的具体用法?Python numpy.maximum怎么用?Python numpy.maximum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.maximum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: truncate0
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def truncate0(x, axis=None, strict=False, tol=1e-13):
'''make sure everything in x is non-negative'''
# the maximum along axis
maxes = np.maximum(np.amax(x, axis=axis), 1e-300)
# the negative part of minimum along axis
mins = np.maximum(-np.amin(x, axis=axis), 0.0)
# assert the negative numbers are small (relative to maxes)
assert np.all(mins <= tol * maxes)
if axis is not None:
idx = [slice(None)] * x.ndim
idx[axis] = np.newaxis
mins = mins[idx]
maxes = maxes[idx]
if strict:
# set everything below the tolerance to 0
return set0(x, x < tol * maxes)
else:
# set everything of same magnitude as most negative number, to 0
return set0(x, x < 2 * mins)
示例2: _composite_log_likelihood
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _composite_log_likelihood(data, demo, mut_rate=None, truncate_probs=0.0, vector=False, p_missing=None, use_pairwise_diffs=False, **kwargs):
try:
sfs = data.sfs
except AttributeError:
sfs = data
sfs_probs = np.maximum(expected_sfs(demo, sfs.configs, normalized=True, **kwargs),
truncate_probs)
log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector)
# add on log likelihood of poisson distribution for total number of SNPs
if mut_rate is not None:
log_lik = log_lik + \
_mut_factor(sfs, demo, mut_rate, vector,
p_missing, use_pairwise_diffs)
if not vector:
log_lik = np.squeeze(log_lik)
return log_lik
示例3: get_thickness_at_chord_fraction_legacy
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def get_thickness_at_chord_fraction_legacy(self, chord_fraction):
# Returns the (interpolated) camber at a given location(s). The location is specified by the chord fraction, as measured from the leading edge. Thickness is nondimensionalized by chord (i.e. this function returns t/c at a given x/c).
chord = np.max(self.coordinates[:, 0]) - np.min(
self.coordinates[:, 0]) # This should always be 1, but this is just coded for robustness.
x = chord_fraction * chord + min(self.coordinates[:, 0])
upperCoors = self.upper_coordinates()
lowerCoors = self.lower_coordinates()
y_upper_func = sp_interp.interp1d(x=upperCoors[:, 0], y=upperCoors[:, 1], copy=False, fill_value='extrapolate')
y_lower_func = sp_interp.interp1d(x=lowerCoors[:, 0], y=lowerCoors[:, 1], copy=False, fill_value='extrapolate')
y_upper = y_upper_func(x)
y_lower = y_lower_func(x)
thickness = np.maximum(y_upper - y_lower, 0)
return thickness
示例4: is_a_scores_vector_batch
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
norm_parents = np.linalg.norm(parent_vectors, axis=1)
norms_other = np.linalg.norm(other_vectors, axis=1)
euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
if not rel_reversed:
cos_angles_child = (norms_other**2 - norm_parents**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_parents) # 1 + neg_size
angles_psi_parent = np.arcsin(K / norm_parents) # scalar
else:
cos_angles_child = (norm_parents**2 - norms_other**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_other) # 1 + neg_size
angles_psi_parent = np.arcsin(K / norms_other) # 1 + neg_size
assert not np.isnan(cos_angles_child).any()
clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = np.arccos(clipped_cos_angle_child) # (1 + neg_size, batch_size)
return np.maximum(0, angles_child - angles_psi_parent)
示例5: fit_gaussian_draw
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
示例6: bound_by_data
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def bound_by_data(Z, Data):
"""
Determine lower and upper bound for each dimension from the Data, and project
Z so that all points in Z live in the bounds.
Z: m x d
Data: n x d
Return a projected Z of size m x d.
"""
n, d = Z.shape
Low = np.min(Data, 0)
Up = np.max(Data, 0)
LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)
Z = np.maximum(LowMat, Z)
Z = np.minimum(UpMat, Z)
return Z
示例7: avg_pairwise_hets
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def avg_pairwise_hets(self):
# avg number of hets per ind per pop (assuming Hardy-Weinberg)
n_nonmissing = np.sum(self.configs.value, axis=2)
# for denominator, assume 1 allele is drawn from whole sample, and 1
# allele is drawn only from nomissing alleles
denoms = np.maximum(n_nonmissing * (self.sampled_n - 1), 1.0)
p_het = 2 * self.configs.value[:, :, 0] * \
self.configs.value[:, :, 1] / denoms
return self.freqs_matrix.T.dot(p_het)
示例8: sgd
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def sgd(fun, x0, fun_and_jac, pieces, stepsize, num_iters, bounds=None, callback=None, iter_per_output=10, rgen=np.random):
x0 = np.array(x0)
if callback is None:
callback = lambda *a, **kw: None
if bounds is None:
bounds = [(None, None) for _ in x0]
lower, upper = zip(*bounds)
lower = [-float('inf') if l is None else l
for l in lower]
upper = [float('inf') if u is None else u
for u in upper]
def truncate(x):
return np.maximum(np.minimum(x, upper), lower)
x = x0
for nit in range(num_iters):
i = rgen.randint(pieces)
f_x, g_x = fun_and_jac(x, i)
x = truncate(x - stepsize * g_x)
if nit % iter_per_output == 0:
callback(x, f_x, nit)
return scipy.optimize.OptimizeResult({'x': x, 'fun': f_x, 'jac': g_x})
示例9: _compute_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True
self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
euclidean_dists_sq = self.euclidean_dists ** 2
if not self.rels_reversed:
# (1 + neg_size, batch_size)
child_numerator = self.norms_v_sq - self.norms_u_sq - euclidean_dists_sq
self.child_numitor = 2 * self.euclidean_dists * self.norms_u
self.angles_psi_parent = np.arcsin(self.K / self.norms_u) # (1, batch_size)
else:
# (1 + neg_size, batch_size)
child_numerator = self.norms_u_sq - self.norms_v_sq - euclidean_dists_sq
self.child_numitor = 2 * self.euclidean_dists * self.norms_v
self.angles_psi_parent = np.arcsin(self.K / self.norms_v) # (1 + neg_size, batch_size)
self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child) # (1 + neg_size, batch_size)
self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss
示例10: _compute_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True
self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)
self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
self.g_sqrt = np.sqrt(self.g)
self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt
if not self.rels_reversed:
# u is x , v is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)
else:
# v is x , u is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)
self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child) # (1 + neg_size, batch_size)
self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss
示例11: _loss_fn
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _loss_fn(self, matrix, rels_reversed):
"""Given a numpy array with vectors for u, v and negative samples, computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
rels_reversed : bool
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
if not rels_reversed:
entailment_penalty = grad_np.maximum(0, vector_u - vectors_v) # (1 + negative_size, dim).
else:
entailment_penalty = grad_np.maximum(0, - vector_u + vectors_v) # (1 + negative_size, dim).
energy_vec = grad_np.linalg.norm(entailment_penalty, axis=1) ** 2
positive_term = energy_vec[0]
negative_terms = energy_vec[1:]
return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum()
示例12: _compute_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True
if not self.rels_reversed:
self.entailment_penalty = np.maximum(0, self.vectors_u - self.vectors_v) # (1 + negative_size, dim, batch_size).
else:
self.entailment_penalty = np.maximum(0, - self.vectors_u + self.vectors_v) # (1 + negative_size, dim, batch_size).
self.energy_vec = np.linalg.norm(self.entailment_penalty, axis=1)**2 # (1 + negative_size, batch_size).
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss
示例13: is_a_scores_vector_batch
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def is_a_scores_vector_batch(self, alpha, parent_vectors, other_vectors, rel_reversed):
if not rel_reversed:
return np.linalg.norm(np.maximum(0, parent_vectors - other_vectors), axis=1)
else:
return np.linalg.norm(np.maximum(0, - parent_vectors + other_vectors), axis=1)
示例14: _maxmargin_loss_fn
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _maxmargin_loss_fn(poincare_dists, maxmargin_margin):
"""
Parameters
----------
poincare_dists : numpy.array
All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).
Returns
----------
max-margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
"""
positive_term = poincare_dists[0]
negative_terms = poincare_dists[1:]
return grad_np.maximum(0, maxmargin_margin + positive_term - negative_terms).sum()
示例15: _compute_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import maximum [as 别名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._compute_distances()
if self.loss_type == 'nll':
# NLL loss from the NIPS paper.
exp_negative_distances = np.exp(-self.poincare_dists) # (1 + neg_size, batch_size)
# Remove the value for the true edge (u,v) from the partition function
Z = exp_negative_distances[1:].sum(axis=0) # (batch_size)
self.exp_negative_distances = exp_negative_distances # (1 + neg_size, batch_size)
self.Z = Z # (batch_size)
self.pos_loss = self.poincare_dists[0].sum()
self.neg_loss = np.log(self.Z).sum()
self.loss = self.pos_loss + self.neg_loss # scalar
elif self.loss_type == 'neg':
# NEG loss function:
# - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
positive_term = np.log(1.0 + np.exp((- self.neg_r + self.poincare_dists[0]) / self.neg_t)) # (batch_size)
negative_terms = self.neg_mu * \
np.log(1.0 + np.exp((self.neg_r - self.poincare_dists[1:]) / self.neg_t)) # (1 + neg_size, batch_size)
self.pos_loss = positive_term.sum()
self.neg_loss = negative_terms.sum()
self.loss = self.pos_loss + self.neg_loss # scalar
elif self.loss_type == 'maxmargin':
# max - margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
self.loss = np.maximum(0, self.maxmargin_margin + self.poincare_dists[0] - self.poincare_dists[1:]).sum() # scalar
self.pos_loss = self.loss
self.neg_loss = self.loss
else:
raise ValueError('Unknown loss type : ' + self.loss_type)
self._loss_computed = True