本文整理匯總了Python中numpy.nanargmin方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.nanargmin方法的具體用法?Python numpy.nanargmin怎麽用?Python numpy.nanargmin使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.nanargmin方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: mouse_drag
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def mouse_drag(self, event):
'''
'''
if event.inaxes == self.ax and event.button == 1:
# Index of nearest point
i = np.nanargmin(((event.xdata - self.x) / self.nx) ** 2)
j = np.nanargmin(((event.ydata - self.y) / self.ny) ** 2)
if (i == self.last_i) and (j == self.last_j):
return
else:
self.last_i = i
self.last_j = j
# Toggle pixel
if self.aperture[j, i]:
self.aperture[j, i] = 0
else:
self.aperture[j, i] = 1
# Update the contour
self.update()
示例2: mouse_click
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def mouse_click(self, event):
'''
'''
if event.mouseevent.inaxes == self.ax:
# Index of nearest point
i = np.nanargmin(
((event.mouseevent.xdata - self.x) / self.nx) ** 2)
j = np.nanargmin(
((event.mouseevent.ydata - self.y) / self.ny) ** 2)
self.last_i = i
self.last_j = j
# Toggle pixel
if self.aperture[j, i]:
self.aperture[j, i] = 0
else:
self.aperture[j, i] = 1
# Update the contour
self.update()
示例3: _compute_eps
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def _compute_eps(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array (or a scalar) of orders.
rdp: A list (or a scalar) of RDP guarantees.
delta: The target delta.
Returns:
Pair of (eps, optimal_order).
Raises:
ValueError: If input is malformed.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if len(orders_vec) != len(rdp_vec):
raise ValueError("Input lists must have the same length.")
eps = rdp_vec - math.log(delta) / (orders_vec - 1)
idx_opt = np.nanargmin(eps) # Ignore NaNs
return eps[idx_opt], orders_vec[idx_opt]
示例4: OnKeyPressed
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def OnKeyPressed(self, event=None):
if event.GetKeyCode() == wx.WXK_RIGHT:
self.nextImage(event=None)
elif event.GetKeyCode() == wx.WXK_LEFT:
self.prevImage(event=None)
elif event.GetKeyCode() == wx.WXK_BACK:
pos_abs = event.GetPosition()
inv = self.axes.transData.inverted()
pos_rel = list(inv.transform(pos_abs))
pos_rel[1] = (
self.axes.get_ylim()[0] - pos_rel[1]
) # Recall y-axis is inverted
i = np.nanargmin(
[self.calc_distance(*dp.point.center, *pos_rel) for dp in self.drs]
)
closest_dp = self.drs[i]
msg = wx.MessageBox(
"Do you want to remove the label %s ?" % closest_dp.bodyParts,
"Remove!",
wx.YES_NO | wx.ICON_WARNING,
)
if msg == 2:
closest_dp.delete_data()
示例5: OnKeyPressed
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def OnKeyPressed(self, event=None):
if event.GetKeyCode() == wx.WXK_RIGHT:
self.nextImage(event=None)
elif event.GetKeyCode() == wx.WXK_LEFT:
self.prevImage(event=None)
elif event.GetKeyCode() == wx.WXK_BACK:
pos_abs = event.GetPosition()
inv = self.axes.transData.inverted()
pos_rel = list(inv.transform(pos_abs))
pos_rel[1] = (
self.axes.get_ylim()[0] - pos_rel[1]
) # Recall y-axis is inverted
i = np.nanargmin(
[self.calc_distance(*dp.point.center, *pos_rel) for dp in self.drs]
)
closest_dp = self.drs[i]
msg = wx.MessageBox(
f"Do you want to remove the label {closest_dp.individual_name}:{closest_dp.bodyParts}?",
"Remove!",
wx.YES_NO | wx.ICON_WARNING,
)
if msg == 2:
closest_dp.delete_data()
示例6: save_everything
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def save_everything(args, metrics_hist_all, model, model_dir, params, criterion, evaluate=False):
"""
Save metrics, model, params all in model_dir
"""
save_metrics(metrics_hist_all, model_dir)
params['model_dir'] = model_dir
save_params_dict(params)
if not evaluate:
#save the model with the best criterion metric
if not np.all(np.isnan(metrics_hist_all[0][criterion])):
if criterion == 'loss_dev':
eval_val = np.nanargmin(metrics_hist_all[0][criterion])
else:
eval_val = np.nanargmax(metrics_hist_all[0][criterion])
if eval_val == len(metrics_hist_all[0][criterion]) - 1:
#save state dict
sd = model.cpu().state_dict()
torch.save(sd, model_dir + "/model_best_%s.pth" % criterion)
if args.gpu:
model.cuda()
print("saved metrics, params, model to directory %s\n" % (model_dir))
示例7: weighted_minhash
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def weighted_minhash(v, sample_size, rs, ln_cs, betas):
if sample_size != rs.shape[0]:
raise ValueError("Input sample size mismatch, expecting %d" % rs.shape[0])
if len(v) != rs.shape[1]:
raise ValueError("Input dimension mismatch, expecting %d" % rs.shape[1])
hashvalues = numpy.zeros((sample_size, 2), dtype=numpy.uint32)
vzeros = (v == 0)
if vzeros.all():
raise ValueError("Input is all zeros")
v[vzeros] = numpy.nan
vlog = numpy.log(v)
v[vzeros] = 0
for i in range(sample_size):
t = numpy.floor((vlog / rs[i]) + betas[i])
ln_y = (t - betas[i]) * rs[i]
ln_a = ln_cs[i] - ln_y - rs[i]
k = numpy.nanargmin(ln_a)
hashvalues[i][0], hashvalues[i][1] = k, int(t[k])
return hashvalues
示例8: early_stop_decision
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def early_stop_decision(self, epoch, val_metric, val_loss):
'''
Stop training if validation loss has stopped decreasing and
validation BLEU score has not increased for --patience epochs.
WARNING: quits with sys.exit(0).
TODO: this doesn't yet support early stopping based on TER
'''
if val_loss < self.best_val_loss:
self.wait = 0
elif val_metric > self.best_val_metric or self.args.no_early_stopping:
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
# we have exceeded patience
if val_loss > self.best_val_loss:
# and loss is no longer decreasing
logger.info("Epoch %d: early stopping", epoch)
handle = open("checkpoints/%s/summary"
% self.args.run_string, "a")
handle.write("Early stopping because patience exceeded\n")
best_bleu = np.nanargmax(self.val_metric)
best_loss = np.nanargmin(self.val_loss)
logger.info("Best Metric: %d | val loss %.5f score %.2f",
best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu])
logger.info("Best loss: %d | val loss %.5f score %.2f",
best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss])
handle.close()
sys.exit(0)
示例9: log_performance
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def log_performance(self):
'''
Record model performance so far, based on validation loss.
'''
handle = open("checkpoints/%s/summary" % self.args.run_string, "w")
for epoch in range(len(self.val_loss)):
handle.write("Checkpoint %d | val loss: %.5f bleu %.2f\n"
% (epoch+1, self.val_loss[epoch],
self.val_metric[epoch]))
logger.info("---") # break up the presentation for clarity
# BLEU is the quickest indicator of performance for our task
# but loss is our objective function
best_bleu = np.nanargmax(self.val_metric)
best_loss = np.nanargmin(self.val_loss)
logger.info("Best Metric: %d | val loss %.5f score %.2f",
best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu])
handle.write("Best Metric: %d | val loss %.5f score %.2f\n"
% (best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu]))
logger.info("Best loss: %d | val loss %.5f score %.2f",
best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss])
handle.write("Best loss: %d | val loss %.5f score %.2f\n"
% (best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss]))
logger.info("Early stopping marker: wait/patience: %d/%d\n",
self.wait, self.patience)
handle.write("Early stopping marker: wait/patience: %d/%d\n" %
(self.wait, self.patience))
handle.close()
示例10: test_nanfunctions_matrices_general
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def test_nanfunctions_matrices_general():
# Check that it works and that type and
# shape are preserved
# 2018-04-29: moved here from core.tests.test_nanfunctions
mat = np.matrix(np.eye(3))
for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
np.nanmean, np.nanvar, np.nanstd):
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
for f in np.nancumsum, np.nancumprod:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3*3))
示例11: test_nanargmin
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
示例12: _
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
except ValueError: # Raised by nanargmin([nan]).
pass
else:
target = _with_attrs(
_untransform( # More precise than transforming back.
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, ds[argmin], None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
示例13: train
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def train(self, alpha):
"""
Finds the agglomerative clustering on the data alpha
:param alpha: angles in radians
:returns: data, cluster ids
"""
assert len(alpha.shape) == 1, 'Clustering works only for 1d data'
n = len(alpha)
cid = np.arange(n, dtype=int)
nu = n
while nu > self.numclust:
mu = np.asarray([descr.mean(alpha[cid == j]) if j in cid else np.Inf for j in range(n)])
D = np.abs(descr.pairwise_cdiff(mu))
idx = np.triu_indices(n,1)
min = np.nanargmin(D[idx])
cid[cid == cid[idx[0][min]]] = cid[idx[1][min]]
nu -= 1
cid2 = np.empty_like(cid)
for i,j in enumerate(np.unique(cid)):
cid2[cid == j] = i
ucid = np.unique(cid2)
self.centroids = np.asarray([descr.mean(alpha[cid2 == i]) for i in ucid])
self.cluster_ids = ucid
self.r = np.asarray([descr.resultant_vector_length(alpha[cid2 == i]) for i in ucid])
return alpha, cid2
示例14: cal_eer
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def cal_eer(fpr, tpr):
# makes fpr + tpr = 1
eer = fpr[np.nanargmin(np.absolute((fpr + tpr - 1)))]
return eer
示例15: _get_best_estimate_single_method
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanargmin [as 別名]
def _get_best_estimate_single_method(derivative, errors):
"""Select best derivative estimates element wise.
Given a single method, e.g. central differences with 2 num_terms (see above), we get
multiple Richardson approximations including estimated errors. Here we select the
approximations which result in the lowest error element wise.
Args:
derivative (np.ndarray): Derivative estimates from Richardson approximation.
First axis (axis 0) denotes the potentially multiple estimates. Following
dimensions represent the dimension of the derivative, i.e. for a classical
gradient ``derivative`` has 2 dimensions, while for a classical jacobian
``derivative`` has 3 dimensions.
errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same
shape as ``derivative``.
Returns:
derivative_minimal (np.ndarray): Best derivate estimates chosen with respect
to minimizing ``errors``. Note that the best values are selected
element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``.
error_minimal (np.ndarray): Minimal errors selected element-wise along axis
0 of ``errors``.
"""
if derivative.shape[0] == 1:
derivative_minimal = np.squeeze(derivative, axis=0)
error_minimal = np.squeeze(errors, axis=0)
else:
minimizer = np.nanargmin(errors, axis=0)
derivative_minimal = np.take_along_axis(
derivative, minimizer[np.newaxis, :], axis=0
)
derivative_minimal = np.squeeze(derivative_minimal, axis=0)
error_minimal = np.nanmin(errors, axis=0)
return derivative_minimal, error_minimal