本文整理汇总了Python中scipy.maximum函数的典型用法代码示例。如果您正苦于以下问题:Python maximum函数的具体用法?Python maximum怎么用?Python maximum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了maximum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prior_log_loss
def prior_log_loss(frac_pos, task=BINARY_CLASSIFICATION):
"""Baseline log loss.
For multiplr classes ot labels return the volues for each column
"""
eps = 1e-15
frac_pos_ = sp.maximum(eps, frac_pos)
if task != MULTICLASS_CLASSIFICATION: # binary case
frac_neg = 1 - frac_pos
frac_neg_ = sp.maximum(eps, frac_neg)
pos_class_log_loss_ = -frac_pos * np.log(frac_pos_)
neg_class_log_loss_ = -frac_neg * np.log(frac_neg_)
base_log_loss = pos_class_log_loss_ + neg_class_log_loss_
# base_log_loss = mvmean(base_log_loss)
# print('binary {}'.format(base_log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else: # multiclass case
fp = frac_pos_ / sum(
frac_pos_
) # Need to renormalize the lines in multiclass case
# Only ONE label is 1 in the multiclass case active for each line
pos_class_log_loss_ = -frac_pos * np.log(fp)
base_log_loss = np.sum(pos_class_log_loss_)
return base_log_loss
示例2: nms
def nms(dets,proba, T):
dets = dets.astype("float")
if len(dets) == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = proba
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = sp.maximum(x1[i], x1[order[1:]])
yy1 = sp.maximum(y1[i], y1[order[1:]])
xx2 = sp.minimum(x2[i], x2[order[1:]])
yy2 = sp.minimum(y2[i], y2[order[1:]])
w = sp.maximum(0.0, xx2 - xx1 + 1)
h = sp.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = sp.where(ovr <= T)[0]
order = order[inds + 1]
return keep
示例3: nms
def nms(boxes, T = 0.5):
if len(boxes) == 0:
return []
boxes = boxes.astype("float")
pick = []
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = sp.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = sp.maximum(x1[i], x1[idxs[:last]])
yy1 = sp.maximum(y1[i], y1[idxs[:last]])
xx2 = sp.minimum(x2[i], x2[idxs[:last]])
yy2 = sp.minimum(y2[i], y2[idxs[:last]])
w = sp.maximum(0, xx2 - xx1 + 1)
h = sp.maximum(0, yy2 - yy1 + 1)
I = w * h
#overlap_ratio = I / area[idxs[:last]]
overlap_ratio = I /(area[i] + area[idxs[:last]] - I)
idxs = sp.delete(idxs, sp.concatenate(([last], sp.where(overlap_ratio > T)[0])))
return boxes[pick].astype("int")
示例4: bac_metric
def bac_metric (solution, prediction, task='binary.classification'):
''' Compute the normalized balanced accuracy. The binarization and
the normalization differ for the multi-label and multi-class case. '''
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn,fp,tp,fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
tp = sp.maximum (eps, tp)
pos_num = sp.maximum (eps, tp+fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if (task != 'multiclass.classification') or (label_num==1):
tn = sp.maximum (eps, tn)
neg_num = sp.maximum (eps, tn+fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5*(tpr + tnr)
base_bac = 0.5 # random predictions for binary case
else:
bac = tpr
base_bac = 1./label_num # random predictions for multiclass case
bac = mvmean(bac) # average over all classes
# Normalize: 0 for random, 1 for perfect
score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
return score
示例5: reload
def reload(self):
if self.set:
return False
else:
m, n = self.A.shape
self.W0 = sp.maximum(sp.matrix(sp.random.normal(size=(m, self.rank))), 0)
self.H0 = sp.maximum(sp.matrix(sp.random.normal(size=(self.rank, n))), 0)
return True
示例6: __init__
def __init__(self, A, r, eps=10 ** -4, T=500, **kwargs):
self.rank = r
self.tol = eps
self.maxiter = T
self.set = False
try:
self.A = sp.matrix(A)
except ValueError("Matrix incorrectly defined."):
exit()
except:
exit("Unknow error occured.")
m, n = self.A.shape
if "seed" in kwargs.keys():
self.seed = kwargs["seed"]
else:
self.seed = False
if "num" in kwargs.keys():
self.num = kwargs["num"] # koliko puta zelimo ponoviti postupka sa slucajno geneririranim matricama
else:
self.num = 1
if "W0" in kwargs.keys():
try:
self.W0 = sp.matrix(kwargs["W0"])
if (m, r) != self.W0.shape:
raise ValueError
else:
self.set = True
except:
self.W0 = sp.maximum(sp.matrix(sp.random.normal(size=(m, r))), 0)
else:
self.W0 = sp.maximum(sp.matrix(sp.random.normal(size=(m, r))), 0)
if "H0" in kwargs.keys():
try:
self.H0 = sp.matrix(kwargs["H0"])
if (r, n) != H0.shape:
raise ValueError
else:
self.set = True
except:
self.H0 = sp.maximum(sp.matrix(sp.random.normal(size=(r, n))), 0)
else:
self.H0 = sp.maximum(sp.matrix(sp.random.normal(size=(r, n))), 0)
if "rw" in kwargs.keys():
self.rw = rw
else:
self.rw = 1
示例7: balanced_accuracy
def balanced_accuracy(solution, prediction):
y_type, solution, prediction = _check_targets(solution, prediction)
if y_type not in ["binary", "multiclass", 'multilabel-indicator']:
raise ValueError("{0} is not supported".format(y_type))
if y_type == 'binary':
# Do not transform into any multiclass representation
pass
elif y_type == 'multiclass':
# Need to create a multiclass solution and a multiclass predictions
max_class = int(np.max((np.max(solution), np.max(prediction))))
solution_binary = np.zeros((len(solution), max_class + 1))
prediction_binary = np.zeros((len(prediction), max_class + 1))
for i in range(len(solution)):
solution_binary[i, int(solution[i])] = 1
prediction_binary[i, int(prediction[i])] = 1
solution = solution_binary
prediction = prediction_binary
elif y_type == 'multilabel-indicator':
solution = solution.toarray()
prediction = prediction.toarray()
else:
raise NotImplementedError('bac_metric does not support task type %s'
% y_type)
fn = np.sum(np.multiply(solution, (1 - prediction)), axis=0,
dtype=float)
tp = np.sum(np.multiply(solution, prediction), axis=0, dtype=float)
# Bounding to avoid division by 0
eps = 1e-15
tp = sp.maximum(eps, tp)
pos_num = sp.maximum(eps, tp + fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if y_type in ('binary', 'multilabel-indicator'):
tn = np.sum(np.multiply((1 - solution), (1 - prediction)),
axis=0, dtype=float)
fp = np.sum(np.multiply((1 - solution), prediction), axis=0,
dtype=float)
tn = sp.maximum(eps, tn)
neg_num = sp.maximum(eps, tn + fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5 * (tpr + tnr)
elif y_type == 'multiclass':
label_num = solution.shape[1]
bac = tpr
else:
raise ValueError(y_type)
return np.mean(bac) # average over all classes
示例8: f1_metric
def f1_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Compute the normalized f1 measure.
The binarization differs
for the multi-label and multi-class case.
A non-weighted average over classes is taken.
The score is normalized.
:param solution:
:param prediction:
:param task:
:return:
"""
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn, fp, tp, fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
true_pos_num = sp.maximum(eps, tp + fn)
found_pos_num = sp.maximum(eps, tp + fp)
tp = sp.maximum(eps, tp)
tpr = tp / true_pos_num # true positive rate (recall)
ppv = tp / found_pos_num # positive predictive value (precision)
arithmetic_mean = 0.5 * sp.maximum(eps, tpr + ppv)
# Harmonic mean:
f1 = tpr * ppv / arithmetic_mean
# Average over all classes
f1 = np.mean(f1)
# Normalize: 0 for random, 1 for perfect
if (task != MULTICLASS_CLASSIFICATION) or (label_num == 1):
# How to choose the "base_f1"?
# For the binary/multilabel classification case, one may want to predict all 1.
# In that case tpr = 1 and ppv = frac_pos. f1 = 2 * frac_pos / (1+frac_pos)
# frac_pos = mvmean(solution.ravel())
# base_f1 = 2 * frac_pos / (1+frac_pos)
# or predict random values with probability 0.5, in which case
# base_f1 = 0.5
# the first solution is better only if frac_pos > 1/3.
# The solution in which we predict according to the class prior frac_pos gives
# f1 = tpr = ppv = frac_pos, which is worse than 0.5 if frac_pos<0.5
# So, because the f1 score is used if frac_pos is small (typically <0.1)
# the best is to assume that base_f1=0.5
base_f1 = 0.5
# For the multiclass case, this is not possible (though it does not make much sense to
# use f1 for multiclass problems), so the best would be to assign values at random to get
# tpr=ppv=frac_pos, where frac_pos=1/label_num
else:
base_f1 = 1. / label_num
score = (f1 - base_f1) / sp.maximum(eps, (1 - base_f1))
return score
示例9: periodic_jacobian
def periodic_jacobian(self, params, eps,
relativeScale=False, stepSizeCutoff=None):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses finite differences.
Inputs:
params -- Parameters about which to calculate the jacobian
eps -- Step size to take, may be vector or scalar.
relativeScale -- If true, the eps is taken to be the fractional
change in parameter to use in finite differences.
stepSizeCutoff -- Minimum step size to take.
"""
res = self.resDict(params)
orig_vals = scipy.array(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale:
eps_l = scipy.maximum(eps * abs(params), stepSizeCutoff)
else:
eps_l = scipy.maximum(eps * scipy.ones(len(params),scipy.float_),
stepSizeCutoff)
J = KeyedList() # will hold the result
for resId in res.keys():
J.set(resId, [])
# Two-sided finite difference
for ii in range(len(params)):
params[ii] = orig_vals[ii] + eps_l[ii]
resPlus = self.resDict(params)
params[ii] = orig_vals[ii] - eps_l[ii]
resMinus = self.resDict(params)
params[ii] = orig_vals[ii]
for resId in res.keys():
res_deriv = (resPlus[resId]-resMinus[resId])/(2.*eps_l[ii])
J.get(resId).append(res_deriv)
# NOTE: after call to ComputeResidualsWithScaleFactors the Model's
# parameters get updated, must reset this:
self.params.update(params)
return J
示例10: logloss
def logloss(act, pred):
epsilon = 1e-4
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = -1.0/len(act) * sum(act*sp.log(pred) +
sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
return ll
示例11: _set_reach_dist
def _set_reach_dist(setofobjects, point_index, epsilon):
# Assumes that the query returns ordered (smallest distance first)
# entries. This is the case for the balltree query...
dists, indices = setofobjects.query(setofobjects.data[point_index],
setofobjects._nneighbors[point_index])
# Checks to see if there more than one member in the neighborhood ##
if sp.iterable(dists):
# Masking processed values ##
# n_pr is 'not processed'
n_pr = indices[(setofobjects._processed[indices] < 1)[0].T]
rdists = sp.maximum(dists[(setofobjects._processed[indices] < 1)[0].T],
setofobjects.core_dists_[point_index])
new_reach = sp.minimum(setofobjects.reachability_[n_pr], rdists)
setofobjects.reachability_[n_pr] = new_reach
# Checks to see if everything is already processed;
# if so, return control to main loop ##
if n_pr.size > 0:
# Define return order based on reachability distance ###
return n_pr[sp.argmin(setofobjects.reachability_[n_pr])]
else:
return point_index
示例12: hessian_elem
def hessian_elem(self, func, f0, params, i, j, epsi, epsj,
relativeScale, stepSizeCutoff, verbose):
"""
Return the second partial derivative for func w.r.t. parameters i and j
f0: The value of the function at params
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
"""
origPi, origPj = params[i], params[j]
if relativeScale:
# Steps sizes are given by eps*the value of the parameter,
# but the minimum step size is stepSizeCutoff
hi, hj = scipy.maximum((epsi*abs(origPi), epsj*abs(origPj)),
(stepSizeCutoff, stepSizeCutoff))
else:
hi, hj = epsi, epsj
if i == j:
params[i] = origPi + hi
fp = func(params)
params[i] = origPi - hi
fm = func(params)
element = (fp - 2*f0 + fm)/hi**2
else:
## f(xi + hi, xj + h)
params[i] = origPi + hi
params[j] = origPj + hj
fpp = func(params)
## f(xi + hi, xj - hj)
params[i] = origPi + hi
params[j] = origPj - hj
fpm = func(params)
## f(xi - hi, xj + hj)
params[i] = origPi - hi
params[j] = origPj + hj
fmp = func(params)
## f(xi - hi, xj - hj)
params[i] = origPi - hi
params[j] = origPj - hj
fmm = func(params)
element = (fpp - fpm - fmp + fmm)/(4 * hi * hj)
params[i], params[j] = origPi, origPj
self._notify(event = 'hessian element', i = i, j = j,
element = element)
if verbose:
print 'hessian[%i, %i] = %g' % (i, j, element)
return element
示例13: set_reach_dist
def set_reach_dist(SetOfObjects,point_index,epsilon):
### Assumes that the query returns ordered (smallest distance first) entries ###
### This is the case for the balltree query... ###
### ...switching to a query structure that does not do this will break things! ###
### And break in a non-obvious way: For cases where multiple entries are tied in ###
### reachablitly distance, it will cause the next point to be processed in ###
### random order, instead of the closest point. This may manefest in edge cases ###
### where different runs of OPTICS will give different ordered lists and hence ###
### different clustering structure...removing reproducability. ###
distances, indices = SetOfObjects.query(SetOfObjects.data[point_index],
SetOfObjects._nneighbors[point_index])
## Checks to see if there more than one member in the neighborhood ##
if scipy.iterable(distances):
## Masking processed values ##
unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T]
rdistances = scipy.maximum(distances[(SetOfObjects._processed[indices] < 1)[0].T],SetOfObjects._core_dist[point_index])
SetOfObjects._reachability[unprocessed] = scipy.minimum(SetOfObjects._reachability[unprocessed], rdistances)
### Checks to see if everything is already processed; if so, return control to main loop ##
if unprocessed.size > 0:
### Define return order based on reachability distance ###
return sorted(zip(SetOfObjects._reachability[unprocessed],unprocessed), key=lambda reachability: reachability[0])[0][1]
else:
return point_index
else: ## Not sure if this else statement is actaully needed... ##
return point_index
示例14: psiTF_1d
def psiTF_1d(self,x=None, w = None):
if x == None:
x = self.x_1d
if w == None:
w = self.wx
interaction = 4*pi*hbar**2*self.a1d/self.m
return (scipy.maximum(0,(self.mu-self.harm_pot_1d(x,w))/interaction))**.5
示例15: entropyloss
def entropyloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
el = sum(act*sp.log10(pred) + sp.subtract(1,act)*sp.log10(sp.subtract(1,pred)))
el = el * -1.0/len(act)
return el