本文整理汇总了Python中sandbox.util.SparseUtils.SparseUtils.getOmegaListPtr方法的典型用法代码示例。如果您正苦于以下问题:Python SparseUtils.getOmegaListPtr方法的具体用法?Python SparseUtils.getOmegaListPtr怎么用?Python SparseUtils.getOmegaListPtr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sandbox.util.SparseUtils.SparseUtils
的用法示例。
在下文中一共展示了SparseUtils.getOmegaListPtr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testLocalAucApprox
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testLocalAucApprox(self):
m = 100
n = 200
k = 2
X, U, s, V, wv = SparseUtils.generateSparseBinaryMatrix((m, n), k, csarray=True, verbose=True)
w = 1.0
localAuc = MCEvaluator.localAUC(X, U, V, w)
samples = numpy.arange(150, 200, 10)
for i, sampleSize in enumerate(samples):
numAucSamples = sampleSize
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, numAucSamples)
self.assertAlmostEqual(localAuc2, localAuc, 1)
# Try smaller w
w = 0.5
localAuc = MCEvaluator.localAUC(X, U, V, w)
samples = numpy.arange(50, 200, 10)
for i, sampleSize in enumerate(samples):
numAucSamples = sampleSize
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, numAucSamples)
self.assertAlmostEqual(localAuc2, localAuc, 1)
示例2: testGetOmegaListPtr
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testGetOmegaListPtr(self):
import sppy
m = 10
n = 5
X = scipy.sparse.rand(m, n, 0.1)
X = X.tocsr()
indPtr, colInds = SparseUtils.getOmegaListPtr(X)
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
nptst.assert_array_almost_equal(omegai, X.toarray()[i, :].nonzero()[0])
Xsppy = sppy.csarray(X)
indPtr, colInds = SparseUtils.getOmegaListPtr(Xsppy)
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
nptst.assert_array_almost_equal(omegai, X.toarray()[i, :].nonzero()[0])
#Test a zero array (scipy doesn't work in this case)
X = sppy.csarray((m,n))
indPtr, colInds = SparseUtils.getOmegaListPtr(X)
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
示例3: profileDerivativeUiApprox
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def profileDerivativeUiApprox(self):
k = 10
U = numpy.random.rand(self.m, k)
V = numpy.random.rand(self.n, k)
indPtr, colInds = SparseUtils.getOmegaListPtr(self.X)
gp = numpy.random.rand(self.n)
gp /= gp.sum()
gq = numpy.random.rand(self.n)
gq /= gq.sum()
j = 3
numRowSamples = 100
numAucSamples = 10
permutedRowInds = numpy.array(numpy.random.permutation(self.m), numpy.uint32)
permutedColInds = numpy.array(numpy.random.permutation(self.n), numpy.uint32)
maxLocalAuc = MaxLocalAUC(k, w=0.9)
normGp, normGq = maxLocalAuc.computeNormGpq(indPtr, colInds, gp, gq, self.m)
lmbda = 0.001
normalise = True
learner = MaxLocalAUCCython()
def run():
numRuns = 10
for j in range(numRuns):
for i in range(self.m):
learner.derivativeUiApprox(indPtr, colInds, U, V, gp, gq, permutedColInds, i)
ProfileUtils.profile("run()", globals(), locals())
示例4: f1AtK
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def f1AtK(positiveArray, orderedItems, k, verbose=False):
"""
Return the [email protected] measure for each row of the predicted matrix UV.T
using real values in positiveArray. positiveArray is a tuple (indPtr, colInds)
:param orderedItems: The ordered items for each user (users are rows, items are cols)
:param verbose: If true return recall and first k recommendation for each row, otherwise just precisions
"""
if type(positiveArray) != tuple:
positiveArray = SparseUtils.getOmegaListPtr(positiveArray)
orderedItems = orderedItems[:, 0:k]
indPtr, colInds = positiveArray
precisions = MCEvaluatorCython.precisionAtk(indPtr, colInds, orderedItems)
recalls = MCEvaluatorCython.recallAtk(indPtr, colInds, orderedItems)
denominator = precisions+recalls
denominator += denominator == 0
f1s = 2*precisions*recalls/denominator
if verbose:
return f1s, orderedItems
else:
return f1s.mean()
示例5: profileObjective
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def profileObjective(self):
k = 10
U = numpy.random.rand(self.m, k)
V = numpy.random.rand(self.n, k)
indPtr, colInds = SparseUtils.getOmegaListPtr(self.X)
colIndsProbabilities = numpy.ones(colInds.shape[0])
for i in range(self.m):
colIndsProbabilities[indPtr[i] : indPtr[i + 1]] /= colIndsProbabilities[indPtr[i] : indPtr[i + 1]].sum()
colIndsProbabilities[indPtr[i] : indPtr[i + 1]] = numpy.cumsum(
colIndsProbabilities[indPtr[i] : indPtr[i + 1]]
)
r = numpy.zeros(self.m)
lmbda = 0.001
rho = 1.0
numAucSamples = 100
def run():
numRuns = 10
for i in range(numRuns):
objectiveApprox(indPtr, colInds, indPtr, colInds, U, V, r, numAucSamples, lmbda, rho, False)
ProfileUtils.profile("run()", globals(), locals())
示例6: learnPredictRanking
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def learnPredictRanking(args):
"""
A function to train on a training set and test on a test set, for a number
of values of rho.
"""
learner, trainX, testX, rhos = args
logging.debug("k=" + str(learner.getK()))
logging.debug(learner)
testInds = testX.nonzero()
trainXIter = []
testIndList = []
for rho in rhos:
trainXIter.append(trainX)
testIndList.append(testInds)
trainXIter = iter(trainXIter)
ZIter = learner.learnModel(trainXIter, iter(rhos))
metrics = numpy.zeros(rhos.shape[0])
for j, Z in enumerate(ZIter):
U, s, V = Z
U = U*s
U = numpy.ascontiguousarray(U)
V = numpy.ascontiguousarray(V)
testOrderedItems = MCEvaluatorCython.recommendAtk(U, V, learner.recommendSize, trainX)
if learner.metric == "mrr":
metrics[j] = MCEvaluator.mrrAtK(SparseUtils.getOmegaListPtr(testX), testOrderedItems, learner.recommendSize)
logging.debug("[email protected]" + str(learner.recommendSize) + ": " + str('%.4f' % metrics[j]) + " " + str(learner))
elif learner.metric == "f1":
metrics[j] = MCEvaluator.mrrAtK(SparseUtils.getOmegaListPtr(testX), testOrderedItems, learner.recommendSize)
logging.debug("[email protected]" + str(learner.recommendSize) + ": " + str('%.4f' % metrics[j]) + " " + str(learner))
else:
raise ValueError("Unknown metric " + learner.metric)
gc.collect()
return metrics
示例7: testScale
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testScale(self):
"""
Look at the scales of the unnormalised gradients.
"""
m = 100
n = 400
k = 3
X = SparseUtils.generateSparseBinaryMatrix((m, n), k, csarray=True)
w = 0.1
eps = 0.001
learner = MaxAUCTanh(k, w)
learner.normalise = False
learner.lmbdaU = 1.0
learner.lmbdaV = 1.0
learner.rho = 1.0
learner.numAucSamples = 100
indPtr, colInds = SparseUtils.getOmegaListPtr(X)
r = numpy.random.rand(m)
U = numpy.random.rand(X.shape[0], k)
V = numpy.random.rand(X.shape[1], k)
gi = numpy.random.rand(m)
gi /= gi.sum()
gp = numpy.random.rand(n)
gp /= gp.sum()
gq = numpy.random.rand(n)
gq /= gq.sum()
permutedRowInds = numpy.array(numpy.random.permutation(m), numpy.uint32)
permutedColInds = numpy.array(numpy.random.permutation(n), numpy.uint32)
maxLocalAuc = MaxLocalAUC(k, w)
normGp, normGq = maxLocalAuc.computeNormGpq(indPtr, colInds, gp, gq, m)
normDui = 0
for i in range(m):
du = learner.derivativeUi(indPtr, colInds, U, V, r, gi, gp, gq, i)
normDui += numpy.linalg.norm(du)
normDui /= float(m)
print(normDui)
normDvi = 0
for i in range(n):
dv = learner.derivativeVi(indPtr, colInds, U, V, r, gi, gp, gq, i)
normDvi += numpy.linalg.norm(dv)
normDvi /= float(n)
print(normDvi)
示例8: testLocalAucApprox2
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testLocalAucApprox2(self):
m = 100
n = 200
k = 5
numInds = 100
X, U, s, V, wv = SparseUtils.generateSparseBinaryMatrix((m, n), k, csarray=True, verbose=True)
r = numpy.ones(m) * -10
w = 0.5
localAuc = MCEvaluator.localAUC(X, U, V, w)
samples = numpy.arange(50, 200, 10)
for i, sampleSize in enumerate(samples):
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, sampleSize)
self.assertAlmostEqual(localAuc2, localAuc, 1)
# Test more accurately
sampleSize = 1000
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, sampleSize)
self.assertAlmostEqual(localAuc2, localAuc, 2)
# Now set a high r
Z = U.dot(V.T)
localAuc = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, sampleSize)
for i, sampleSize in enumerate(samples):
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, sampleSize)
self.assertAlmostEqual(localAuc2, localAuc, 1)
# Test more accurately
sampleSize = 1000
localAuc2 = MCEvaluator.localAUCApprox(SparseUtils.getOmegaListPtr(X), U, V, w, sampleSize)
self.assertAlmostEqual(localAuc2, localAuc, 2)
示例9: computeTestMRR
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def computeTestMRR(args):
"""
A simple function for outputing F1 for a learner in conjunction e.g. with
parallel model selection.
"""
trainX, testX, learner = args
learner.learnModel(trainX)
testOrderedItems = MCEvaluatorCython.recommendAtk(learner.U, learner.V, learner.recommendSize, trainX)
mrr = MCEvaluator.mrrAtK(SparseUtils.getOmegaListPtr(testX), testOrderedItems, learner.recommendSize)
try:
learnerStr = learner.modelParamsStr()
except:
learnerStr = str(learner)
logging.debug("[email protected]" + str(learner.recommendSize) + ": " + str("%.4f" % mrr) + " " + learnerStr)
return mrr
示例10: stratifiedRecallAtK
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def stratifiedRecallAtK(positiveArray, orderedItems, k, itemCounts, beta=0.5, verbose=False):
"""
Compute the average [email protected] score for each row of the predicted matrix UV.T
using real values in positiveArray. positiveArray is a tuple (indPtr, colInds)
:param orderedItems: The ordered items for each user (users are rows, items are cols)
:param verbose: If true return recall and first k recommendation for each row, otherwise just precisions
"""
if type(positiveArray) != tuple:
positiveArray = SparseUtils.getOmegaListPtr(positiveArray)
orderedItems = orderedItems[:, 0:k]
indPtr, colInds = positiveArray
recalls, denominators = MCEvaluatorCython.stratifiedRecallAtk(indPtr, colInds, orderedItems, itemCounts, beta)
if verbose:
return recalls, orderedItems
else:
return numpy.average(recalls, weights=denominators)
示例11: precisionAtK
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def precisionAtK(positiveArray, orderedItems, k, verbose=False):
"""
Compute the average [email protected] score for each row of the predicted matrix UV.T
using real values in positiveArray. positiveArray is a tuple (indPtr, colInds)
:param orderedItems: The ordered items for each user (users are rows, items are cols)
:param verbose: If true return precision and first k recommendation for each row, otherwise just precisions
"""
if type(positiveArray) != tuple:
positiveArray = SparseUtils.getOmegaListPtr(positiveArray)
orderedItems = orderedItems[:, 0:k]
indPtr, colInds = positiveArray
precisions = MCEvaluatorCython.precisionAtk(indPtr, colInds, orderedItems)
if verbose:
return precisions, orderedItems
else:
return precisions.mean()
示例12: localAUCApprox
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def localAUCApprox(positiveArray, U, V, w, numAucSamples=50, r=None, allArray=None):
"""
Compute the estimated local AUC for the score functions UV^T relative to X with
quantile w. The AUC is computed using positiveArray which is a tuple (indPtr, colInds)
assuming allArray is None. If allArray is not None then positive items are chosen
from positiveArray and negative ones are chosen to complement allArray.
"""
if type(positiveArray) != tuple:
positiveArray = SparseUtils.getOmegaListPtr(positiveArray)
indPtr, colInds = positiveArray
U = numpy.ascontiguousarray(U)
V = numpy.ascontiguousarray(V)
if r is None:
r = SparseUtilsCython.computeR(U, V, w, numAucSamples)
if allArray is None:
return MCEvaluatorCython.localAUCApprox(indPtr, colInds, indPtr, colInds, U, V, numAucSamples, r)
else:
allIndPtr, allColInd = allArray
return MCEvaluatorCython.localAUCApprox(indPtr, colInds, allIndPtr, allColInd, U, V, numAucSamples, r)
示例13: testRestrictOmega
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testRestrictOmega(self):
m = 50
n = 100
k = 5
u = 0.5
w = 1-u
X = SparseUtils.generateSparseBinaryMatrix((m, n), k, w, csarray=True)
indPtr, colInds = SparseUtils.getOmegaListPtr(X)
runs = 100
for i in range(runs):
colSubset = numpy.random.choice(n, 20, replace=False)
newIndPtr, newColInds = restrictOmega(indPtr, colInds, colSubset)
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
omegai2 = newColInds[newIndPtr[i]:newIndPtr[i+1]]
a = numpy.setdiff1d(omegai, omegai2)
self.assertEquals(numpy.intersect1d(a, colSubset).shape[0], 0)
示例14: localAUC
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def localAUC(positiveArray, U, V, w, numRowInds=None):
"""
Compute the local AUC for the score functions UV^T relative to X with
quantile w.
"""
if numRowInds == None:
numRowInds = V.shape[0]
if type(positiveArray) != tuple:
positiveArray = SparseUtils.getOmegaListPtr(positiveArray)
#For now let's compute the full matrix
Z = U.dot(V.T)
r = SparseUtilsCython.computeR(U, V, w, numRowInds)
localAuc = numpy.zeros(U.shape[0])
allInds = numpy.arange(V.shape[0])
indPtr, colInds = positiveArray
for i in range(U.shape[0]):
omegai = colInds[indPtr[i]:indPtr[i+1]]
omegaBari = numpy.setdiff1d(allInds, omegai, assume_unique=True)
if omegai.shape[0] * omegaBari.shape[0] != 0:
partialAuc = 0
for p in omegai:
for q in omegaBari:
if Z[i, p] > Z[i, q] and Z[i, p] > r[i]:
partialAuc += 1
localAuc[i] = partialAuc/float(omegai.shape[0] * omegaBari.shape[0])
localAuc = localAuc.mean()
return localAuc
示例15: testDerivativeU
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import getOmegaListPtr [as 别名]
def testDerivativeU(self):
m = 10
n = 20
nnzPerRow = 5
X = SparseUtils.generateSparseBinaryMatrix((m, n), nnzPerRow, csarray=True)
k = 5
eps = 0.05
learner = MaxAUCSigmoid(k)
learner.normalise = False
learner.lmbdaU = 0
learner.lmbdaV = 0
learner.rho = 1.0
learner.numAucSamples = n
numRuns = 20
gi = numpy.random.rand(m)
gi /= gi.sum()
gp = numpy.random.rand(n)
gp /= gp.sum()
gq = numpy.random.rand(n)
gq /= gq.sum()
indPtr, colInds = SparseUtils.getOmegaListPtr(X)
for s in range(numRuns):
U = numpy.random.randn(m, k)
V = numpy.random.randn(n, k)
deltaU = numpy.zeros(U.shape)
for i in range(X.shape[0]):
deltaU[i, :] = learner.derivativeUi(indPtr, colInds, U, V, gp, gq, i)
deltaU2 = numpy.zeros(U.shape)
eps = 10**-8
for i in range(m):
for j in range(k):
tempU = U.copy()
tempU[i,j] += eps
obj1 = learner.objective(indPtr, colInds, indPtr, colInds, tempU, V, gp, gq)
tempU = U.copy()
tempU[i,j] -= eps
obj2 = learner.objective(indPtr, colInds, indPtr, colInds, tempU, V, gp, gq)
deltaU2[i,j] = (obj1-obj2)/(2*eps)
#deltaU2[i,:] = deltaU2[i,:]/numpy.linalg.norm(deltaU2[i,:])
#print(deltaU*100)
#print(deltaU2*100)
nptst.assert_almost_equal(deltaU, deltaU2, 3)
#Try r != 0 and rho > 0
for s in range(numRuns):
U = numpy.random.randn(m, k)
V = numpy.random.randn(n, k)
learner.rho = 0.1
deltaU = numpy.zeros(U.shape)
for i in range(X.shape[0]):
deltaU[i, :] = learner.derivativeUi(indPtr, colInds, U, V, gp, gq, i)
deltaU2 = numpy.zeros(U.shape)
eps = 10**-9
for i in range(m):
for j in range(k):
tempU = U.copy()
tempU[i,j] += eps
obj1 = learner.objective(indPtr, colInds, indPtr, colInds, tempU, V, gp, gq)
tempU = U.copy()
tempU[i,j] -= eps
obj2 = learner.objective(indPtr, colInds, indPtr, colInds, tempU, V, gp, gq)
deltaU2[i,j] = (obj1-obj2)/(2*eps)
nptst.assert_almost_equal(deltaU, deltaU2, 3)
#Try lmbda > 0
for s in range(numRuns):
U = numpy.random.randn(m, k)
V = numpy.random.randn(n, k)
learner.lmbdaU = 0.5
deltaU = numpy.zeros(U.shape)
for i in range(X.shape[0]):
deltaU[i, :] = learner.derivativeUi(indPtr, colInds, U, V, gp, gq, i)
deltaU2 = numpy.zeros(U.shape)
eps = 10**-9
for i in range(m):
for j in range(k):
tempU = U.copy()
tempU[i,j] += eps
obj1 = learner.objective(indPtr, colInds, indPtr, colInds, tempU, V, gp, gq)
#.........这里部分代码省略.........