本文整理汇总了Python中sandbox.util.SparseUtils.SparseUtils.generateLowRank方法的典型用法代码示例。如果您正苦于以下问题:Python SparseUtils.generateLowRank方法的具体用法?Python SparseUtils.generateLowRank怎么用?Python SparseUtils.generateLowRank使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sandbox.util.SparseUtils.SparseUtils
的用法示例。
在下文中一共展示了SparseUtils.generateLowRank方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testParallelSparseLowRankOp
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import generateLowRank [as 别名]
def testParallelSparseLowRankOp(self):
numRuns = 10
for i in range(numRuns):
m = numpy.random.randint(10, 100)
n = numpy.random.randint(10, 100)
density = numpy.random.rand()
A = scipy.sparse.rand(m, n, density)
A = A.tocsc()
r = numpy.random.randint(10, 100)
U, s, V = SparseUtils.generateLowRank((m, n), r)
L = LinOperatorUtils.parallelSparseLowRankOp(A, U, s, V)
u = numpy.random.rand(m)
v = numpy.random.rand(n)
r = 10
W = numpy.random.rand(m, r)
X = numpy.random.rand(n, r)
B = numpy.array(A+(U*s).dot(V.T))
nptst.assert_array_almost_equal(L.matvec(v), B.dot(v))
nptst.assert_array_almost_equal(L.rmatvec(u), B.T.dot(u))
nptst.assert_array_almost_equal(L.matmat(X), B.dot(X))
nptst.assert_array_almost_equal(L.rmatmat(W), B.T.dot(W))
示例2: profilePartialReconstructValsPQ
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import generateLowRank [as 别名]
def profilePartialReconstructValsPQ(self):
shape = 5000, 10000
r = 100
U, s, V = SparseUtils.generateLowRank(shape, r)
k = 1000000
inds = numpy.unravel_index(numpy.random.randint(0, shape[0]*shape[1], k), dims=shape)
ProfileUtils.profile('SparseUtilsCython.partialReconstructValsPQ(inds[0], inds[1], U, V)', globals(), locals())
示例3: testReconstructLowRank
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import generateLowRank [as 别名]
def testReconstructLowRank(self):
shape = (5000, 1000)
r = 5
U, s, V = SparseUtils.generateLowRank(shape, r)
inds = numpy.array([0])
X = SparseUtils.reconstructLowRank(U, s, V, inds)
self.assertAlmostEquals(X[0, 0], (U[0, :]*s).dot(V[0, :]))
示例4: testGenerateLowRank
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import generateLowRank [as 别名]
def testGenerateLowRank(self):
shape = (5000, 1000)
r = 5
U, s, V = SparseUtils.generateLowRank(shape, r)
nptst.assert_array_almost_equal(U.T.dot(U), numpy.eye(r))
nptst.assert_array_almost_equal(V.T.dot(V), numpy.eye(r))
self.assertEquals(U.shape[0], shape[0])
self.assertEquals(V.shape[0], shape[1])
self.assertEquals(s.shape[0], r)
#Check the range is not
shape = (500, 500)
r = 100
U, s, V = SparseUtils.generateLowRank(shape, r)
X = (U*s).dot(V.T)
self.assertTrue(abs(numpy.max(X) - 1) < 0.5)
self.assertTrue(abs(numpy.min(X) + 1) < 0.5)
示例5: generateMatrices
# 需要导入模块: from sandbox.util.SparseUtils import SparseUtils [as 别名]
# 或者: from sandbox.util.SparseUtils.SparseUtils import generateLowRank [as 别名]
def generateMatrices(self):
"""
This function returns a list of 20 train/test matrices for incremental
collaborative filtering. Each item in the list is (trainX, testX).
"""
numpy.random.seed(21)
r = 50
U, s, V = SparseUtils.generateLowRank((self.endM, self.endN), r, normalise=False)
self.startNumInds = self.pnz*self.startM*self.startN
self.endNumInds = self.pnz*self.endM*self.endN
if not self.nonUniform:
inds = numpy.random.randint(0, self.endM*self.endN-1, self.endNumInds)
else:
logging.debug("Using non uniform dataset")
inds = numpy.array(numpy.random.randn(self.endNumInds)*(self.endM*self.endN-1)/4 +(self.endM*self.endN-1)/2, numpy.int)
inds = numpy.clip(inds, 0, (self.endM*self.endN-1))
inds = numpy.unique(inds)
numpy.random.shuffle(inds)
self.endNumInds = inds.shape[0]
rowInds, colInds = numpy.unravel_index(inds, (self.endM, self.endN))
rowInds = numpy.array(rowInds, numpy.int32)
colInds = numpy.array(colInds, numpy.int32)
vals = SparseUtilsCython.partialReconstructValsPQ(rowInds, colInds, U*s, V)
vals /= vals.std()
vals += numpy.random.randn(vals.shape[0])*self.noise
isTrainInd = numpy.array(numpy.random.rand(inds.shape[0]) <= self.trainSplit, numpy.bool)
assert (self.trainSplit - isTrainInd.sum()/float(isTrainInd.shape[0]))
XMaskTrain = scipy.sparse.csc_matrix((isTrainInd, (rowInds, colInds)), dtype=numpy.bool, shape=(self.endM, self.endN))
XMaskTest = scipy.sparse.csc_matrix((numpy.logical_not(isTrainInd), (rowInds, colInds)), dtype=numpy.bool, shape=(self.endM, self.endN))
#In the first phase, the matrices stay the same size but there are more nonzero
#entries
numMatrices = 10
stepList = numpy.linspace(self.startNumInds, self.endNumInds, numMatrices)
trainXList = []
testXList = []
for i in range(numMatrices):
currentVals = vals[0:stepList[i]]
currentRowInds = rowInds[0:stepList[i]]
currentColInds = colInds[0:stepList[i]]
X = scipy.sparse.csc_matrix((currentVals, (currentRowInds, currentColInds)), dtype=numpy.float, shape=(self.endM, self.endN))
#print("pnz=" + str(X.nnz/float(X.shape[0]*X.shape[1])))
trainX = X.multiply(XMaskTrain)[0:self.startM, 0:self.startN]
trainX.eliminate_zeros()
trainX.prune()
testX = X.multiply(XMaskTest)[0:self.startM, 0:self.startN]
testX.eliminate_zeros()
testX.prune()
trainXList.append(trainX)
testXList.append(testX)
#Now we increase the size of matrix
numMatrices = 10
mStepList = numpy.linspace(self.startM, self.endM, numMatrices)
nStepList = numpy.linspace(self.startN, self.endN, numMatrices)
X = scipy.sparse.csc_matrix((vals, (rowInds, colInds)), dtype=numpy.float, shape=(self.endM, self.endN))
for i in range(numMatrices):
trainX = X.multiply(XMaskTrain)[0:mStepList[i], :][:, 0:nStepList[i]]
trainX.eliminate_zeros()
trainX.prune()
testX = X.multiply(XMaskTest)[0:mStepList[i], :][:, 0:nStepList[i]]
testX.eliminate_zeros()
testX.prune()
trainXList.append(trainX)
testXList.append(testX)
return trainXList, testXList