本文整理汇总了Python中scipy.subtract方法的典型用法代码示例。如果您正苦于以下问题:Python scipy.subtract方法的具体用法?Python scipy.subtract怎么用?Python scipy.subtract使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy
的用法示例。
在下文中一共展示了scipy.subtract方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_loss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def log_loss(actual, predicted, epsilon=1e-15):
"""
Calculates and returns the log loss (error) of a set of predicted probabilities
(hint: see sklearn classifier's predict_proba methods).
Source: https://www.kaggle.com/wiki/LogarithmicLoss
In plain English, this error metric is typically used where you have to predict
that something is true or false with a probability (likelihood) ranging from
definitely true (1) to equally true (0.5) to definitely false(0).
Note: also see (and use) scikitlearn:
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html#sklearn.metrics.log_loss
"""
predicted = sp.maximum(epsilon, predicted)
predicted = sp.minimum(1-epsilon, predicted)
ll = sum(actual*sp.log(predicted) + sp.subtract(1,actual)*sp.log(sp.subtract(1,predicted)))
ll = ll * -1.0/len(actual)
return ll
示例2: binary_logloss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
示例3: logloss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
ll = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p)))
ll = ll * -1.0/len(y)
return ll
# B. Apply hash trick of the original csv row
# for simplicity, we treat both integer and categorical features as categorical
# INPUT:
# csv_row: a csv dictionary, ex: {'Lable': '1', 'I1': '357', 'I2': '', ...}
# D: the max index that we can hash to
# OUTPUT:
# x: a list of indices that its value is 1
示例4: logloss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def logloss(p, y):
epsilon = 1e-15
p = max(min(p, 1. - epsilon), epsilon)
ll = y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p))
ll = ll * -1.0/1
return ll
# B. Apply hash trick of the original csv row
# for simplicity, we treat both integer and categorical features as categorical
# INPUT:
# csv_row: a csv dictionary, ex: {'Lable': '1', 'I1': '357', 'I2': '', ...}
# D: the max index that we can hash to
# OUTPUT:
# x: a list of indices that its value is 1
示例5: logloss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def logloss(act, pred):
'''
官方给的损失函数
:param act:
:param pred:
:return:
'''
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1 - epsilon, pred)
ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
ll = ll * -1.0 / len(act)
return ll
示例6: log_loss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def log_loss( act, pred ):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
示例7: llfun
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def llfun(act, pred):
p_true = pred[:, 1]
epsilon = 1e-15
p_true = sp.maximum(epsilon, p_true)
p_true = sp.minimum(1 - epsilon, p_true)
ll = sum(act * sp.log(p_true) + sp.subtract(1, act) * sp.log(sp.subtract(1, p_true)))
ll = ll * -1.0 / len(act)
return ll
示例8: logloss
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import subtract [as 别名]
def logloss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll