本文整理匯總了Python中sage.numerical.mip.MixedIntegerLinearProgram.solver_parameter方法的典型用法代碼示例。如果您正苦於以下問題:Python MixedIntegerLinearProgram.solver_parameter方法的具體用法?Python MixedIntegerLinearProgram.solver_parameter怎麽用?Python MixedIntegerLinearProgram.solver_parameter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sage.numerical.mip.MixedIntegerLinearProgram
的用法示例。
在下文中一共展示了MixedIntegerLinearProgram.solver_parameter方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from sage.numerical.mip import MixedIntegerLinearProgram [as 別名]
# 或者: from sage.numerical.mip.MixedIntegerLinearProgram import solver_parameter [as 別名]
class hard_EM:
def __init__(self, author_graph, TAU=0.5001, nparts=5, init_partition=None):
self.parts = range(nparts)
self.TAU = TAU
self.author_graph = nx.convert_node_labels_to_integers(author_graph, discard_old_labels=False)
self._lp_init = False
# init hidden vars
if init_partition:
self.partition = init_partition
else:
self._rand_init_partition()
self.m_step()
def _rand_init_partition(self):
slog('Random partitioning with seed: %s' % os.getpid())
random.seed(os.getpid())
self.partition = {}
nparts = len(self.parts)
for a in self.author_graph:
self.partition[a] = randint(0, nparts - 1)
def _init_LP(self):
if self._lp_init:
return
slog('Init LP')
self.lp = MixedIntegerLinearProgram(solver='GLPK', maximization=False)
#self.lp.solver_parameter(backend.glp_simplex_or_intopt, backend.glp_simplex_only) # LP relaxation
self.lp.solver_parameter("iteration_limit", LP_ITERATION_LIMIT)
# self.lp.solver_parameter("timelimit", LP_TIME_LIMIT)
# add constraints once here
# constraints
self.alpha = self.lp.new_variable(dim=2)
beta2 = self.lp.new_variable(dim=2)
beta3 = self.lp.new_variable(dim=3)
# alphas are indicator vars
for a in self.author_graph:
self.lp.add_constraint(sum(self.alpha[a][p] for p in self.parts) == 1)
# beta2 is the sum of beta3s
slog('Init LP - pair constraints')
for a, b in self.author_graph.edges():
if self.author_graph[a][b]['denom'] <= 2:
continue
self.lp.add_constraint(0.5 * sum(beta3[a][b][p] for p in self.parts) - beta2[a][b], min=0, max=0)
for p in self.parts:
self.lp.add_constraint(self.alpha[a][p] - self.alpha[b][p] - beta3[a][b][p], max=0)
self.lp.add_constraint(self.alpha[b][p] - self.alpha[a][p] - beta3[a][b][p], max=0)
# store indiv potential linear function as a dict to improve performance
self.objF_indiv_dict = {}
self.alpha_dict = {}
for a in self.author_graph:
self.alpha_dict[a] = {}
for p in self.parts:
var_id = self.alpha_dict[a][p] = self.alpha[a][p].dict().keys()[0]
self.objF_indiv_dict[var_id] = 0 # init variables coeffs to zero
# pairwise potentials
slog('Obj func - pair potentials')
objF_pair_dict = {}
s = log(1 - self.TAU) - log(self.TAU)
for a, b in self.author_graph.edges():
if self.author_graph[a][b]['denom'] <= 2:
continue
var_id = beta2[a][b].dict().keys()[0]
objF_pair_dict[var_id] = -self.author_graph[a][b]['weight'] * s
self.objF_pair = self.lp(objF_pair_dict)
self._lp_init = True
slog('Init LP Done')
def log_phi(self, a, p):
author = self.author_graph.node[a]
th = self.theta[p]
res = th['logPr']
if author['hlpful_fav_unfav']:
res += th['logPrH']
else:
res += th['log1-PrH']
if author['isRealName']:
res += th['logPrR']
else:
res += th['log1-PrR']
res += -((author['revLen'] - th['muL']) ** 2) / (2 * th['sigma2L'] + EPS) - (log_2pi + log(th['sigma2L'])) / 2.0
return res
def log_likelihood(self):
ll = sum(self.log_phi(a, self.partition[a]) for a in self.author_graph.nodes())
log_TAU, log_1_TAU = log(self.TAU), log(1 - self.TAU)
for a, b in self.author_graph.edges():
if self.partition[a] == self.partition[b]:
ll += log_TAU * self.author_graph[a][b]['weight']
else:
ll += log_1_TAU * self.author_graph[a][b]['weight']
return ll
def e_step(self):
slog('E-Step')
#.........這裏部分代碼省略.........