本文整理汇总了Python中nimfa.mf_run函数的典型用法代码示例。如果您正苦于以下问题:Python mf_run函数的具体用法?Python mf_run怎么用?Python mf_run使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mf_run函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_nmf
def run_nmf(V):
"""
Run standard nonnegative matrix factorization.
:param V: Target matrix to estimate.
:type V: :class:`numpy.matrix`
"""
# Euclidean
rank = 10
model = nimfa.mf(V,
seed = "random_vcol",
rank = rank,
method = "nmf",
max_iter = 12,
initialize_only = True,
update = 'euclidean',
objective = 'fro')
fit = nimfa.mf_run(model)
print_info(fit)
# divergence
model = nimfa.mf(V,
seed = "random_vcol",
rank = rank,
method = "nmf",
max_iter = 12,
initialize_only = True,
update = 'divergence',
objective = 'div')
fit = nimfa.mf_run(model)
print_info(fit)
示例2: learnModel
def learnModel(self, X):
"""
Learn X using a matrix factorisation method. If self.rank is an integer
then we factorise with that rank. If it is an array then we compute the
complete regularisation path and return a list of matrices.
"""
if isinstance(self.rank, int):
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predX = W.dot(H)
return predX
else:
predXList = []
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank[0])
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predXList.append(W.dot(H))
for i in range(1, self.rank.shape[0]):
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank[i], W=W, H=H)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predXList.append(W.dot(H))
return predXList
示例3: nmf
def nmf(Xtrn, Xtst):
# Init matricies
Xtrn_n = np.shape(Xtrn)[0]
Xtst_n = np.shape(Xtst)[0]
Xtrn_nmf = np.zeros((Xtrn_n, my_rank))
Xtst_nmf = np.zeros((Xtst_n, my_rank))
print(file_name + ': Running non-negative matrix facorization w/ rank = ' + str(my_rank))
#Xtrn_fctr = nimfa.mf(Xtrn, method = 'nmf', seed = "fixed", max_iter = iters,
# rank = my_rank, update = 'euclidean', objective = 'fro')
print(file_name + ': \t on traning...')
for i in xrange(Xtrn_n):
Xtrn_fctr = nimfa.mf(Xtrn[i,:], method = 'lsnmf', max_iter = iters, rank = my_rank)
Xtrn_res = nimfa.mf_run(Xtrn_fctr)
Xtrn_nmf[i,:] = Xtrn_res.basis()
if (i%10000 == 0): print(file_name + ': \t iter ' + str(i))
print(file_name + ' \t on testing...')
for i in xrange(Xtst_n):
Xtst_fctr = nimfa.mf(Xtst[i,:], method = 'lsnmf', max_iter = iters, rank = my_rank)
Xtst_res = nimfa.mf_run(Xtrn_fctr)
Xtst_nmf[i,:] = Xtst_res.basis()
if (i%10000 == 0): print(file_name + ': \t iter ' + str(i))
"""
Xtrn_sm = Xtrn_res.summary()
Xtst_sm = Xtst_res.summary()
print(file_name + ': \t\t RSS \t Explained Var \t Iters')
print(file_name + ': Xtrn: \t' + str(Xtrn_sm['rss']) + '\t' +
str(Xtrn_sm['evar']) + '\t' + str(Xtrn_sm['n_iter']))
print(file_name + ': Xtst: ' + str(Xtst_sm['rss']) + '\t' +
str(Xtst_sm['evar']) + '\t' + str(Xtst_sm['n_iter']))
"""
return (Xtrn_nmf, Xtst_nmf)
示例4: run
def run(self, **params):
if not self.dataConsolided:
print "NIMFA_SNMNMF: preparing data"
self.consolideTheData()
self.dataConsolided = True
print "NIMFA_SNMNMF: starting"
#
V = self.miRNA.as_matrix()
V1 = self.mRNA.as_matrix()
A = csr_matrix(self.gene2gene)
B = csr_matrix(self.miRNA2gene)
fctr = nimfa.mf(target = (V, V1),
seed = params['seed'], # e.g., "random_c",
rank = params['rank'], # e.g., 50,
method = "snmnmf",
max_iter = params['max_iter'], # e.g., 500,
initialize_only = True,
A = A ,
B = B,
n_run = 3,
gamma = self.g1,
gamma_1 = self.g2,
lamb = self.l1,
lamb_1 = self.l2)
fctr_res = nimfa.mf_run(fctr)
print "NIMFA_SNMNMF: done"
# extract the results
self.W = DataFrame(fctr_res.basis(), index = self.miRNA.index)
self.H1_miRNA = DataFrame(fctr_res.coef(0), columns = self.miRNA.columns)
self.H2_genes = DataFrame(fctr_res.coef(1), columns = self.mRNA.columns)
self.performance = NIMFA_SNMNMFPerformance(fctr_res)
示例5: run_bd
def run_bd(V):
"""
Run Bayesian decomposition.
:param V: Target matrix to estimate.
:type V: :class:`numpy.matrix`
"""
rank = 10
model = nimfa.mf(V,
seed = "random_c",
rank = rank,
method = "bd",
max_iter = 12,
initialize_only = True,
alpha = np.mat(np.zeros((V.shape[0], rank))),
beta = np.mat(np.zeros((rank, V.shape[1]))),
theta = .0,
k = .0,
sigma = 1.,
skip = 100,
stride = 1,
n_w = np.mat(np.zeros((rank, 1))),
n_h = np.mat(np.zeros((rank, 1))),
n_sigma = False)
fit = nimfa.mf_run(model)
print_info(fit)
示例6: factorization
def factorization(V, rank=4):
"""
use nmf to factorize V
:rtype : (1) the projection matrix (2) the feature vector of V
"""
fctr = nimfa.mf(
V,
method="nmf",
max_iter=30,
rank=rank,
update="divergence",
objective="div",
callback_init=init_info,
callback=init_info,
)
fctr_res = nimfa.mf_run(fctr)
print "calculate generized inverse"
projection = pinv(fctr_res.basis().todense())
print "inverse finished"
return {
"projection": projection,
"feature": (projection * V),
"basis": fctr_res.basis(),
"coef": fctr_res.coef().todense(),
}
示例7: nmfMatrix
def nmfMatrix(self, V):
print "---"
print "NMF"
print "---"
V = np.array(V)
print "Target matrix"
print V
fctr = nimfa.mf(V, seed = 'random_vcol', method = 'lsnmf', rank = 40, max_iter = 10)
fctr_res = nimfa.mf_run(fctr)
W = fctr_res.basis()
print "Basis matrix"
print W
H = fctr_res.coef()
print "Coef"
print H
print "Estimate"
print np.dot(W, H)
print 'Rss: %5.4f' % fctr_res.fit.rss()
print 'Evar: %5.4f' % fctr_res.fit.evar()
print 'K-L divergence: %5.4f' % fctr_res.distance(metric = 'kl')
print 'Sparseness, W: %5.4f, H: %5.4f' % fctr_res.fit.sparseness()
return W, H
示例8: factorize
def factorize(V):
"""
Perform SNMF/R factorization on the sparse MovieLens data matrix.
Return basis and mixture matrices of the fitted factorization model.
:param V: The MovieLens data matrix.
:type V: `scipy.sparse.csr_matrix`
"""
model = nimfa.mf(V,
seed="random_vcol",
rank=12,
method="snmf",
max_iter=15,
initialize_only=True,
version='r',
eta=1.,
beta=1e-4,
i_conv=10,
w_min_change=0)
print "Performing %s %s %d factorization ..." % (model, model.seed, model.rank)
fit = nimfa.mf_run(model)
print "... Finished"
sparse_w, sparse_h = fit.fit.sparseness()
print """Stats:
- iterations: %d
- Euclidean distance: %5.3f
- Sparseness basis: %5.3f, mixture: %5.3f""" % (fit.fit.n_iter, fit.distance(metric='euclidean'), sparse_w, sparse_h)
return fit.basis(), fit.coef()
示例9: run_snmnmf
def run_snmnmf(V, V1):
"""
Run sparse network-regularized multiple NMF.
:param V: First target matrix to estimate.
:type V: :class:`numpy.matrix`
:param V1: Second target matrix to estimate.
:type V1: :class:`numpy.matrix`
"""
rank = 10
model = nimfa.mf(target = (V, V1),
seed = "random_c",
rank = rank,
method = "snmnmf",
max_iter = 12,
initialize_only = True,
A = abs(sp.rand(V1.shape[1], V1.shape[1], density = 0.7, format = 'csr')),
B = abs(sp.rand(V.shape[1], V1.shape[1], density = 0.7, format = 'csr')),
gamma = 0.01,
gamma_1 = 0.01,
lamb = 0.01,
lamb_1 = 0.01)
fit = nimfa.mf_run(model)
# print all quality measures concerning first target and mixture matrix in multiple NMF
print_info(fit, idx = 0)
# print all quality measures concerning second target and mixture matrix in multiple NMF
print_info(fit, idx = 1)
示例10: factorize
def factorize(V):
"""
Perform NMF - Divergence factorization on the sparse Medlars data matrix.
Return basis and mixture matrices of the fitted factorization model.
:param V: The Medlars data matrix.
:type V: `scipy.sparse.csr_matrix`
"""
model = nimfa.mf(V,
seed = "random_vcol",
rank = 12,
method = "nmf",
max_iter = 15,
initialize_only = True,
update = 'divergence',
objective = 'div')
print "Performing %s %s %d factorization ..." % (model, model.seed, model.rank)
fit = nimfa.mf_run(model)
print "... Finished"
sparse_w, sparse_h = fit.fit.sparseness()
print """Stats:
- iterations: %d
- KL Divergence: %5.3f
- Euclidean distance: %5.3f
- Sparseness basis: %5.3f, mixture: %5.3f""" % (fit.fit.n_iter, fit.distance(), fit.distance(metric = 'euclidean'), sparse_w, sparse_h)
return fit.basis(), fit.coef()
示例11: factorize
def factorize(V):
"""
Perform LSNMF factorization on the ORL faces data matrix.
Return basis and mixture matrices of the fitted factorization model.
:param V: The ORL faces data matrix.
:type V: `numpy.matrix`
"""
model = nimfa.mf(V,
seed = "random_vcol",
rank = 25,
method = "lsnmf",
max_iter = 50,
initialize_only = True,
sub_iter = 10,
inner_sub_iter = 10,
beta = 0.1,
min_residuals = 1e-8)
print "Performing %s %s %d factorization ..." % (model, model.seed, model.rank)
fit = nimfa.mf_run(model)
print "... Finished"
print """Stats:
- iterations: %d
- final projected gradients norm: %5.3f
- Euclidean distance: %5.3f""" % (fit.fit.n_iter, fit.distance(), fit.distance(metric = 'euclidean'))
return fit.basis(), fit.coef()
示例12: nmfMatrix
def nmfMatrix(self, V, method, rank, maxIter):
print "---"
print "NMF"
print "---"
V = np.array(V)
print "Target matrix"
print V.shape[0]
print V.shape[1]
print V
# X = sp.rand(V.shape[0], V.shape[1], density=1).tocsr()
# NMFの際の、基底数やイテレーションの設定
# rank = 8
# maxIter = 2000
# method = "snmf"
# init2arizer = nimfa.methods.seeding.random_vcol.Random_vcol()
initiarizer = nimfa.methods.seeding.random.Random()
initW, initH = initiarizer.initialize(V, rank, {})
fctr = nimfa.mf(V, seed = 'random_vcol', method = method, rank = rank, max_iter = maxIter)
# fctr = nimfa.mf(V, method = "lsnmf", rank = rank, max_iter = maxIter, W = initW, H = initH)
fctr_res = nimfa.mf_run(fctr)
W = fctr_res.basis()
print "Basis matrix"
print W.shape[0]
print W.shape[1]
print W
H = fctr_res.coef()
print "Coef"
print H.shape[0]
print H.shape[1]
print H
print "Estimate"
print np.dot(W, H)
print 'Rss: %5.4f' % fctr_res.fit.rss()
print 'Evar: %5.4f' % fctr_res.fit.evar()
print 'K-L divergence: %5.4f' % fctr_res.distance(metric = 'kl')
print 'Sparseness, W: %5.4f, H: %5.4f' % fctr_res.fit.sparseness()
sm = fctr_res.summary()
print type(sm)
# print "Rss: %8.3f" % sm['rss']
# # Print explained variance.
# print "Evar: %8.3f" % sm['evar']
# # Print actual number of iterations performed
# print "Iterations: %d" % sm['n_iter']
# プロットの際に不具合が生じるため,numpy.ndarray型に変換
NW = np.asarray(W)
NH = np.asarray(H)
return NW, NH, sm
示例13: _NIMFA_NMF
def _NIMFA_NMF(self, X, nBases):
model = nimfa.mf(X, seed="nndsvd", rank=nBases, method="nmf", initialize_only=True)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
self.W = W.todense()
self.H = H.todense()
return (self.W, self.H)
示例14: _factorize
def _factorize(matrix):
"Factorize the matrix to get pc"
# Build the model
model = mf(matrix,
seed="random_vcol",
rank=15,
method="nmf",
max_iter=15,
initialize_only=True,
update='divergence',
objective='div')
# Then fit it
fit = mf_run(model)
return fit.basis(), fit.coef()
示例15: max_guess_select
def max_guess_select(ratings, users, rank=9, user=None):
matrix = sp.dok_matrix((len(users), len(users)))
for k, v in ratings.items():
matrix[users[k[0]], users[k[1]]] = v
matrix[users[k[1]], users[k[0]]] = v
# Run sparse matrix factorisation
factor = nimfa.mf(matrix, seed="random_c", rank=rank, method="snmf", max_iter=12, initialize_only=True, version='r', eta=1., beta=1e-4, i_conv=10, w_min_change=0)
result = nimfa.mf_run(factor)
if user is None:
# Pick a user to expand
user = min(users, key=lambda u: len([i for i in ratings if u in i]))
recommendations = result.fitted()
rval = max([i for i in users if (i, user) not in ratings and (user, i) not in ratings], key=lambda x: recommendations[users[user], users[x]])
return user, rval