本文整理汇总了Python中pyomo.environ.ConcreteModel.G方法的典型用法代码示例。如果您正苦于以下问题:Python ConcreteModel.G方法的具体用法?Python ConcreteModel.G怎么用?Python ConcreteModel.G使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyomo.environ.ConcreteModel
的用法示例。
在下文中一共展示了ConcreteModel.G方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from pyomo.environ import ConcreteModel [as 别名]
# 或者: from pyomo.environ.ConcreteModel import G [as 别名]
def __init__(self, results, threshold=None, dist_threshold=1.0, distance={}, expression={}, uncertainty={}, overlap=0, k=10, k_taa=0,
solver="glpk", verbosity=0, include=[]):
"""
:param results: Epitope prediction result object from which the epitope selection should be performed
:type results: :class:`~Fred2.Core.Result.EpitopePredictionResult`
:param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
:class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
:param float dist_threshold: Distance threshold: an epitope gets excluded if an epitope has dist-2-self score
smaller or equal to this threshold for any HLA allele
:param dict((str,str),float) distance: A dictionary with key: (peptide sequence, HLA name)
and value the distance2self
:param dict(str, float) expression: A dictionary with key: gene ID, and value: Gene expression
in FPKM/RPKM or TPM
:param dict((str,str),float) uncertainty: A dictionary with key (peptide seq, HLA name), and value the
associated uncertainty of the immunogenicity prediction
:param int k: The number of epitopes to select
:param int k_taa: The number of TAA epitopes to select
:param str solver: The solver to be used (default glpk)
:param int verbosity: Integer defining whether additional debug prints are made >0 => debug mode
"""
# check input data
if not isinstance(results, EpitopePredictionResult):
raise ValueError("first input parameter is not of type EpitopePredictionResult")
_alleles = results.columns.values.tolist()
# generate abundance dictionary of HLA alleles default is 2.0 as values will be log2 transformed
probs = {a.name:2.0 if a.get_metadata("abundance", only_first=True) is None else
a.get_metadata("abundance", only_first=True) for a in _alleles}
# start constructing model
self.__solver = SolverFactory(solver)
self.__verbosity = verbosity
self.__changed = True
self.__alleleProb = _alleles
self.__k = k
self.__k_taa = k_taa
self.__result = None
self.__thresh = {} if threshold is None else threshold
self.__included = include
self.overlap=overlap
# variable, set and parameter preparation
alleles_I = {}
variations = []
epi_var = {}
imm = {}
peps = {}
taa = []
var_epi = {}
cons = {}
for a in _alleles:
alleles_I.setdefault(a.name, set())
# unstack multiindex df to get normal df based on first prediction method
# and filter for binding epitopes
method = results.index.values[0][1]
res_df = results.xs(results.index.values[0][1], level="Method")
# if predcitions are not available for peptides/alleles, replace by 0
res_df.fillna(0, inplace=True)
res_df = res_df[res_df.apply(lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
for a in res_df.columns), axis=1)]
res_df.fillna(0, inplace=True)
# transform scores to 1-log50k(IC50) scores if neccassary
# and generate mapping dictionaries for Set definitions
for tup in res_df.itertuples():
p = tup[0]
seq = str(p)
if any(distance.get((seq, a.name), 1.0) <= dist_threshold for a in _alleles):
continue
peps[seq] = p
if p.get_metadata("taa",only_first=True):
taa.append(seq)
for a, s in itr.izip(res_df.columns, tup[1:]):
if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
try:
thr = min(1., max(0.0, 1.0 - math.log(self.__thresh.get(a.name),
50000))) if a.name in self.__thresh else -float("inf")
except:
thr = 0
if s >= thr:
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = min(1., max(0.0, 1.0 - math.log(s, 50000)))
else:
if s > self.__thresh.get(a.name, -float("inf")):
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = s
prots = set(pr for pr in p.get_all_proteins())
cons[seq] = len(prots)
for prot in prots:
variations.append(prot.gene_id)
epi_var.setdefault(prot.gene_id, set()).add(seq)
#.........这里部分代码省略.........
示例2: create_model
# 需要导入模块: from pyomo.environ import ConcreteModel [as 别名]
# 或者: from pyomo.environ.ConcreteModel import G [as 别名]
def create_model(model_name, nodes, links, type_nodes, type_links, timesteps, params):
m = ConcreteModel(name=model_name)
# SETS
# basic sets
m.Nodes = Set(initialize=nodes) # nodes
m.Links = Set(initialize=links) # links
m.TS = Set(initialize=timesteps, ordered=True) # time steps
# all nodes directly upstream from a node
def NodesIn_init(m, node):
retval = []
for (i, j) in m.Links:
if j == node:
retval.append(i)
return retval
m.NodesIn = Set(m.Nodes, initialize=NodesIn_init)
# all nodes directly downstream from a node
def NodesOut_init(m, node):
retval = []
for (j, k) in m.Links:
if j == node:
retval.append(k)
return retval
m.NodesOut = Set(m.Nodes, initialize=NodesOut_init)
# sets (nodes or links) for each template type
for k, v in type_nodes.items():
exec("m.{} = Set(within=m.Nodes, initialize={})".format(k.replace(" ", "_"), v))
for k, v in type_links.items():
exec("m.{} = Set(within=m.Links, initialize={})".format(k.replace(" ", "_"), v))
# sets for non-storage nodes
m.NonReservoir = m.Nodes - m.Reservoir
m.DemandNodes = m.NonReservoir - m.Junction
# these are collected to initialize the node-block/link-block sets
demand_node_blocks = []
reservoir_blocks = []
link_blocks = []
# set - all blocks in each demand or reservoir node, and identify node-blocks
def NodeBlockLookup_init(m, node):
if "Priority" in params["node"] and node in params["node"]["Priority"]:
blocks = params["node"]["Priority"][node].columns
else:
blocks = [0] # every node should have a priority
node_blocks = [(node, b) for b in blocks]
if node in m.DemandNodes:
demand_node_blocks.extend(node_blocks)
elif node in m.Reservoir:
reservoir_blocks.extend(node_blocks)
return blocks
m.DemandNodeBlockLookup = Set(m.DemandNodes, initialize=NodeBlockLookup_init)
m.ReservoirBlockLookup = Set(m.Reservoir, initialize=NodeBlockLookup_init)
# set - all blocks in each link
def LinkBlockLookup_init(m, i, j):
if "Priority" in params["link"] and (i, j) in params["node"]["Priority"]:
blocks = params["link"]["Priority"][(i, j)].columns
else:
blocks = [0] # every link should have a priority
# return Set.End
link_blocks.extend([(i, j, b) for b in blocks])
return blocks
m.LinkBlockLookup = Set(m.Links, initialize=LinkBlockLookup_init)
# create node-block and link-block sets
m.DemandNodeBlocks = Set(initialize=demand_node_blocks)
m.ReservoirBlocks = Set(initialize=reservoir_blocks)
m.LinkBlocks = Set(initialize=link_blocks)
# VARIABLES
m.D = Var(m.DemandNodes * m.TS, domain=NonNegativeReals) # delivery to demand nodes
m.D_DB = Var(m.DemandNodeBlocks * m.TS, domain=NonNegativeReals) # delivery to demand nodes
m.D_surplus = Var(m.DemandNodes * m.TS, domain=NonNegativeReals) # delivery to demand nodes
m.S = Var(m.Reservoir * m.TS, domain=NonNegativeReals) # storage
m.S_RB = Var(m.ReservoirBlocks * m.TS, domain=NonNegativeReals) # storage
m.S_surplus = Var(m.Reservoir * m.TS, domain=NonNegativeReals) # storage
m.G = Var(m.Nodes * m.TS, domain=NonNegativeReals) # gain (local inflow)
m.L = Var(m.Nodes * m.TS, domain=NonNegativeReals) # loss (local outflow)
m.I = Var(m.Nodes * m.TS, domain=NonNegativeReals) # total inflow to a node
m.O = Var(m.Nodes * m.TS, domain=NonNegativeReals) # total outflow from a node
m.Q = Var(m.Links * m.TS, domain=NonNegativeReals) # flow in links
m.Q_LB = Var(m.LinkBlocks * m.TS, domain=NonNegativeReals) # flow in links
m.Q_surplus = Var(m.Links * m.TS, domain=NonNegativeReals) # flow in links
# PARAMETERS
# IMPORTANT: Defaults should not be set here
#.........这里部分代码省略.........