本文整理汇总了Python中pulp.LpInteger方法的典型用法代码示例。如果您正苦于以下问题:Python pulp.LpInteger方法的具体用法?Python pulp.LpInteger怎么用?Python pulp.LpInteger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pulp
的用法示例。
在下文中一共展示了pulp.LpInteger方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def create_model(self):
def distances(assignment):
return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])
clusters = list(range(self.k))
assignments = [(i, j)for i in range(self.n) for j in range(self.k)]
# outflow variables for data nodes
self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
assignments,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
# outflow variables for cluster nodes
self.b = pulp.LpVariable.dicts('cluster outflows',
clusters,
lowBound=0,
upBound=self.n-self.min_size,
cat=pulp.LpContinuous)
# create the model
self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)
# objective function
self.model += pulp.lpSum(distances(assignment) * self.y[assignment] for assignment in assignments)
# flow balance constraints for data nodes
for i in range(self.n):
self.model += pulp.lpSum(self.y[(i, j)] for j in range(self.k)) == 1
# flow balance constraints for cluster nodes
for j in range(self.k):
self.model += pulp.lpSum(self.y[(i, j)] for i in range(self.n)) - self.min_size == self.b[j]
# flow balance constraint for the sink node
self.model += pulp.lpSum(self.b[j] for j in range(self.k)) == self.n - (self.k * self.min_size)
示例2: create_model
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def create_model(self):
def distances(assignment):
return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])
clusters = list(range(self.k))
assignments = [(i, j)for i in range(self.n) for j in range(self.k)]
# outflow variables for data nodes
self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
assignments,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
# outflow variables for cluster nodes
self.b = pulp.LpVariable.dicts('cluster outflows',
clusters,
lowBound=0,
upBound=self.n-self.min_size,
cat=pulp.LpContinuous)
# create the model
self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)
# objective function
self.model += pulp.lpSum([distances(assignment) * self.y[assignment] for assignment in assignments])
# flow balance constraints for data nodes
for i in range(self.n):
self.model += pulp.lpSum(self.y[(i, j)] for j in range(self.k)) == 1
# flow balance constraints for cluster nodes
for j in range(self.k):
self.model += pulp.lpSum(self.y[(i, j)] for i in range(self.n)) - self.min_size == self.b[j]
# capacity constraint on outflow of cluster nodes
for j in range(self.k):
self.model += self.b[j] <= self.max_size - self.min_size
# flow balance constraint for the sink node
self.model += pulp.lpSum(self.b[j] for j in range(self.k)) == self.n - (self.k * self.min_size)
示例3: create_model
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def create_model(self):
def distances(assignment):
return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])
assignments = [(i, j) for i in range(self.n) for j in range(self.k)]
# assignment variables
self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
assignments,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
# create the model
self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)
# objective function
self.model += pulp.lpSum([distances(assignment) * self.weights[assignment[0]] * self.y[assignment] for assignment in assignments]), 'Objective Function - sum weighted squared distances to assigned centroid'
# this is also weighted, otherwise the weighted centroid computation don't make sense.
# constraints on the total weights of clusters
for j in range(self.k):
self.model += pulp.lpSum([self.weights[i] * self.y[(i, j)] for i in range(self.n)]) >= self.min_weight, "minimum weight for cluster {}".format(j)
self.model += pulp.lpSum([self.weights[i] * self.y[(i, j)] for i in range(self.n)]) <= self.max_weight, "maximum weight for cluster {}".format(j)
# make sure each point is assigned at least once, and only once
for i in range(self.n):
self.model += pulp.lpSum([self.y[(i, j)] for j in range(self.k)]) == 1, "must assign point {}".format(i)
示例4: solve_ilp
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def solve_ilp(self, N):
# build the A matrix: a_ij is 1 if j-th gram appears in the i-th sentence
A = np.zeros((len(self.sentences_idx), len(self.ref_ngrams_idx)))
for i in self.sentences_idx:
sent = self.sentences[i].untokenized_form
sngrams = list(extract_ngrams2([sent], self.stemmer, self.LANGUAGE, N))
for j in self.ref_ngrams_idx:
if self.ref_ngrams[j] in sngrams:
A[i][j] = 1
# Define ILP variable, x_i is 1 if sentence i is selected, z_j is 1 if gram j appears in the created summary
x = pulp.LpVariable.dicts('sentences', self.sentences_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
z = pulp.LpVariable.dicts('grams', self.ref_ngrams_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
# Define ILP problem, maximum coverage of grams from the reference summaries
prob = pulp.LpProblem("ExtractiveUpperBound", pulp.LpMaximize)
prob += pulp.lpSum(z[j] for j in self.ref_ngrams_idx)
# Define ILP constraints, length constraint and consistency constraint (impose that z_j is 1 if j
# appears in the created summary)
prob += pulp.lpSum(x[i] * self.sentences[i].length for i in self.sentences_idx) <= self.sum_length
for j in self.ref_ngrams_idx:
prob += pulp.lpSum(A[i][j] * x[i] for i in self.sentences_idx) >= z[j]
# Solve ILP problem and post-processing to get the summary
try:
print('Solving using CPLEX')
prob.solve(pulp.CPLEX(msg=0))
except:
print('Fall back to GLPK')
prob.solve(pulp.GLPK(msg=0))
summary_idx = []
for idx in self.sentences_idx:
if x[idx].value() == 1.0:
summary_idx.append(idx)
return summary_idx
示例5: make_into_lp_problem
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def make_into_lp_problem(good_for, N, add_noise=False):
"""
Helper function for solve_with_lp_and_reduce()
N --- number of isoform sequences
good_for --- dict of <isoform_index> --> list of matched paths index
"""
prob = LpProblem("The Whiskas Problem",LpMinimize)
# each good_for is (isoform_index, [list of matched paths index])
# ex: (0, [1,2,4])
# ex: (3, [2,5])
used_paths = []
for t_i, p_i_s in good_for:
used_paths += p_i_s
used_paths = list(set(used_paths))
variables = [LpVariable(str(i),0,1,LpInteger) for i in used_paths]
#variables = [LpVariable(str(i),0,1,LpInteger) for i in xrange(N)]
# objective is to minimize sum_{Xi}
prob += sum(v for v in variables)
already_seen = set()
# constraints are for each isoform, expressed as c_i * x_i >= 1
# where c_i = 1 if x_i is matched for the isoform
# ex: (0, [1,2,4]) becomes t_0 = x_1 + x_2 + x_4 >= 1
for t_i, p_i_s in good_for:
#c_i_s = [1 if i in p_i_s else 0 for i in xrange(N)]
#prob += sum(variables[i]*(1 if i in p_i_s else 0) for i in xrange(N)) >= 1
p_i_s.sort()
pattern = ",".join(map(str,p_i_s))
#print >> sys.stderr, t_i, p_i_s, pattern
if pattern not in already_seen:
if add_noise:
prob += sum(variables[i]*(1+random.random() if p in p_i_s else 0) for i,p in enumerate(used_paths)) >= 1
else:
prob += sum(variables[i]*(1 if p in p_i_s else 0) for i,p in enumerate(used_paths)) >= 1
already_seen.add(pattern)
prob.writeLP('cogent.lp')
return prob
示例6: create_lscp_model
# 需要导入模块: import pulp [as 别名]
# 或者: from pulp import LpInteger [as 别名]
def create_lscp_model(coverage_dict, model_file=None, delineator="$", ):
"""
Creates a LSCP (Location set covering problem) using the provided coverage and
parameters. Writes a .lp file which can be solved with Gurobi
Church, R., & Murray, A. (2009). Coverage Business Site Selection, Location
Analysis, and GIS (pp. 209-233). Hoboken, New Jersey: Wiley.
:param coverage_dict: (dictionary) The coverage to use to generate the model
:param model_file: (string) The model file to output
:param delineator: (string) The character(s) to use to delineate the layer from the ids
:return: (Pulp problem) The generated problem to solve
"""
validate_coverage(coverage_dict, ["coverage"], ["binary"])
if not isinstance(coverage_dict, dict):
raise TypeError("coverage_dict is not a dictionary")
if model_file and not (isinstance(model_file, str)):
raise TypeError("model_file is not a string")
if not isinstance(delineator, str):
raise TypeError("delineator is not a string")
# create the variables
demand_vars = {}
for demand_id in coverage_dict["demand"]:
demand_vars[demand_id] = pulp.LpVariable("Y{}{}".format(delineator, demand_id), 0, 1, pulp.LpInteger)
facility_vars = {}
for facility_type in coverage_dict["facilities"]:
facility_vars[facility_type] = {}
for facility_id in coverage_dict["facilities"][facility_type]:
facility_vars[facility_type][facility_id] = pulp.LpVariable(
"{}{}{}".format(facility_type, delineator, facility_id), 0, 1, pulp.LpInteger)
# create the problem
prob = pulp.LpProblem("LSCP", pulp.LpMinimize)
# Create objective, minimize number of facilities
to_sum = []
for facility_type in coverage_dict["facilities"]:
for facility_id in coverage_dict["facilities"][facility_type]:
to_sum.append(facility_vars[facility_type][facility_id])
prob += pulp.lpSum(to_sum)
# add coverage constraints
for demand_id in coverage_dict["demand"]:
to_sum = []
for facility_type in coverage_dict["demand"][demand_id]["coverage"]:
for facility_id in coverage_dict["demand"][demand_id]["coverage"][facility_type]:
to_sum.append(facility_vars[facility_type][facility_id])
# Hack to get model to "solve" when infeasible with GLPK.
# Pulp will automatically add dummy variables when the sum is empty, since these are all the same name,
# it seems that GLPK doesn't read the lp problem properly and fails
if not to_sum:
to_sum = [pulp.LpVariable("__dummy{}{}".format(delineator, demand_id), 0, 0, pulp.LpInteger)]
prob += pulp.lpSum(to_sum) >= 1, "D{}".format(demand_id)
if model_file:
prob.writeLP(model_file)
return prob