本文整理汇总了Python中pyomo.environ.ConcreteModel.E方法的典型用法代码示例。如果您正苦于以下问题:Python ConcreteModel.E方法的具体用法?Python ConcreteModel.E怎么用?Python ConcreteModel.E使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyomo.environ.ConcreteModel
的用法示例。
在下文中一共展示了ConcreteModel.E方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from pyomo.environ import ConcreteModel [as 别名]
# 或者: from pyomo.environ.ConcreteModel import E [as 别名]
def __init__(self, results, threshold=None, k=10, solver="glpk", verbosity=0):
"""
:param result: Epitope prediction result object from which the epitope selection should be performed
:type result: :class:`~Fred2.Core.Result.EpitopePredictionResult`
:param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
:class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
:param int k: The number of epitopes to select
:param str solver: The solver to be used (default glpk)
:param int verbosity: Integer defining whether additional debugg prints are made >0 => debug mode
"""
#check input data
if not isinstance(results, EpitopePredictionResult):
raise ValueError("first input parameter is not of type EpitopePredictionResult")
_alleles = copy.deepcopy(results.columns.values.tolist())
#test if allele prob is set, if not set allele prob uniform
#if only partly set infer missing values (assuming uniformity of missing value)
prob = []
no_prob = []
for a in _alleles:
if a.prob is None:
no_prob.append(a)
else:
prob.append(a)
if len(no_prob) > 0:
#group by locus
no_prob_grouped = {}
prob_grouped = {}
for a in no_prob:
no_prob_grouped.setdefault(a.locus, []).append(a)
for a in prob:
prob_grouped.setdefault(a.locus, []).append(a)
for g, v in no_prob_grouped.iteritems():
total_loc_a = len(v)
if g in prob_grouped:
remaining_mass = 1.0 - sum(a.prob for a in prob_grouped[g])
for a in v:
a.prob = remaining_mass/total_loc_a
else:
for a in v:
a.prob = 1.0/total_loc_a
probs = {a.name:a.prob for a in _alleles}
if verbosity:
for a in _alleles:
print a.name, a.prob
#start constructing model
self.__solver = SolverFactory(solver)
self.__verbosity = verbosity
self.__changed = True
self.__alleleProb = _alleles
self.__k = k
self.__result = None
self.__thresh = {} if threshold is None else threshold
# Variable, Set and Parameter preparation
alleles_I = {}
variations = []
epi_var = {}
imm = {}
peps = {}
cons = {}
#unstack multiindex df to get normal df based on first prediction method
#and filter for binding epitopes
method = results.index.values[0][1]
res_df = results.xs(results.index.values[0][1], level="Method")
res_df = res_df[res_df.apply(lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
for a in res_df.columns), axis=1)]
for tup in res_df.itertuples():
p = tup[0]
seq = str(p)
peps[seq] = p
for a, s in itr.izip(res_df.columns, tup[1:]):
if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
try:
thr = min(1., max(0.0, 1.0 - math.log(self.__thresh.get(a.name),
50000))) if a.name in self.__thresh else -float("inf")
except:
thr = 0
if s >= thr:
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = min(1., max(0.0, 1.0 - math.log(s, 50000)))
else:
if s > self.__thresh.get(a.name, -float("inf")):
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = s
prots = set(pr for pr in p.get_all_proteins())
cons[seq] = len(prots)
for prot in prots:
variations.append(prot.gene_id)
epi_var.setdefault(prot.gene_id, set()).add(seq)
self.__peptideSet = peps
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: from pyomo.environ import ConcreteModel [as 别名]
# 或者: from pyomo.environ.ConcreteModel import E [as 别名]
def __init__(self, results, threshold=None, dist_threshold=1.0, distance={}, expression={}, uncertainty={}, overlap=0, k=10, k_taa=0,
solver="glpk", verbosity=0, include=[]):
"""
:param results: Epitope prediction result object from which the epitope selection should be performed
:type results: :class:`~Fred2.Core.Result.EpitopePredictionResult`
:param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
:class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
:param float dist_threshold: Distance threshold: an epitope gets excluded if an epitope has dist-2-self score
smaller or equal to this threshold for any HLA allele
:param dict((str,str),float) distance: A dictionary with key: (peptide sequence, HLA name)
and value the distance2self
:param dict(str, float) expression: A dictionary with key: gene ID, and value: Gene expression
in FPKM/RPKM or TPM
:param dict((str,str),float) uncertainty: A dictionary with key (peptide seq, HLA name), and value the
associated uncertainty of the immunogenicity prediction
:param int k: The number of epitopes to select
:param int k_taa: The number of TAA epitopes to select
:param str solver: The solver to be used (default glpk)
:param int verbosity: Integer defining whether additional debug prints are made >0 => debug mode
"""
# check input data
if not isinstance(results, EpitopePredictionResult):
raise ValueError("first input parameter is not of type EpitopePredictionResult")
_alleles = results.columns.values.tolist()
# generate abundance dictionary of HLA alleles default is 2.0 as values will be log2 transformed
probs = {a.name:2.0 if a.get_metadata("abundance", only_first=True) is None else
a.get_metadata("abundance", only_first=True) for a in _alleles}
# start constructing model
self.__solver = SolverFactory(solver)
self.__verbosity = verbosity
self.__changed = True
self.__alleleProb = _alleles
self.__k = k
self.__k_taa = k_taa
self.__result = None
self.__thresh = {} if threshold is None else threshold
self.__included = include
self.overlap=overlap
# variable, set and parameter preparation
alleles_I = {}
variations = []
epi_var = {}
imm = {}
peps = {}
taa = []
var_epi = {}
cons = {}
for a in _alleles:
alleles_I.setdefault(a.name, set())
# unstack multiindex df to get normal df based on first prediction method
# and filter for binding epitopes
method = results.index.values[0][1]
res_df = results.xs(results.index.values[0][1], level="Method")
# if predcitions are not available for peptides/alleles, replace by 0
res_df.fillna(0, inplace=True)
res_df = res_df[res_df.apply(lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
for a in res_df.columns), axis=1)]
res_df.fillna(0, inplace=True)
# transform scores to 1-log50k(IC50) scores if neccassary
# and generate mapping dictionaries for Set definitions
for tup in res_df.itertuples():
p = tup[0]
seq = str(p)
if any(distance.get((seq, a.name), 1.0) <= dist_threshold for a in _alleles):
continue
peps[seq] = p
if p.get_metadata("taa",only_first=True):
taa.append(seq)
for a, s in itr.izip(res_df.columns, tup[1:]):
if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
try:
thr = min(1., max(0.0, 1.0 - math.log(self.__thresh.get(a.name),
50000))) if a.name in self.__thresh else -float("inf")
except:
thr = 0
if s >= thr:
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = min(1., max(0.0, 1.0 - math.log(s, 50000)))
else:
if s > self.__thresh.get(a.name, -float("inf")):
alleles_I.setdefault(a.name, set()).add(seq)
imm[seq, a.name] = s
prots = set(pr for pr in p.get_all_proteins())
cons[seq] = len(prots)
for prot in prots:
variations.append(prot.gene_id)
epi_var.setdefault(prot.gene_id, set()).add(seq)
#.........这里部分代码省略.........