本文整理汇总了Python中rgt.GenomicRegionSet.GenomicRegionSet.sort方法的典型用法代码示例。如果您正苦于以下问题:Python GenomicRegionSet.sort方法的具体用法?Python GenomicRegionSet.sort怎么用?Python GenomicRegionSet.sort使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rgt.GenomicRegionSet.GenomicRegionSet
的用法示例。
在下文中一共展示了GenomicRegionSet.sort方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_subtract_exact
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
def test_subtract_exact(self):
reference = GenomicRegionSet("reference")
reference.read(os.path.join(os.path.dirname(__file__), "test_result.bed"))
background = GenomicRegionSet("background")
background.read(os.path.join(os.path.dirname(__file__), "test_background.bed"))
target = GenomicRegionSet("target")
target.read(os.path.join(os.path.dirname(__file__), "test_target.bed"))
background_tmp = background.subtract(target, exact=True)
reference.sort()
self.assertEqual(len(background_tmp.sequences), len(reference.sequences))
for region, region_ref in zip(background_tmp.sequences, reference.sequences):
self.assertEqual(region.__cmp__(region_ref), 0)
示例2: merge_delete
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
def merge_delete(ext_size, merge, peak_list, pvalue_list):
# peaks_gain = read_diffpeaks(path)
regions_plus = GenomicRegionSet('regions') #pot. mergeable
regions_minus = GenomicRegionSet('regions') #pot. mergeable
regions_unmergable = GenomicRegionSet('regions')
last_orientation = ""
for i, t in enumerate(peak_list):
chrom, start, end, c1, c2, strand, ratio = t[0], t[1], t[2], t[3], t[4], t[5], t[6]
r = GenomicRegion(chrom = chrom, initial = start, final = end, name = '', \
orientation = strand, data = str((c1, c2, pvalue_list[i], ratio)))
if end - start > ext_size:
if strand == '+':
if last_orientation == '+':
region_plus.add(r)
else:
regions_unmergable.add(r)
elif strand == '-':
if last_orientation == '-':
region_mins.add(r)
else:
regions_unmergable.add(r)
if merge:
regions_plus.extend(ext_size/2, ext_size/2)
regions_plus.merge()
regions_plus.extend(-ext_size/2, -ext_size/2)
merge_data(regions_plus)
regions_minus.extend(ext_size/2, ext_size/2)
regions_minus.merge()
regions_minus.extend(-ext_size/2, -ext_size/2)
merge_data(regions_minus)
results = GenomicRegionSet('regions')
for el in regions_plus:
results.add(el)
for el in regions_minus:
results.add(el)
for el in regions_unmergable:
results.add(el)
results.sort()
return results
示例3: create_file
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
def create_file(self):
# Expanding summits
tfbs_summit_regions = GenomicRegionSet("TFBS Summit Regions")
tfbs_summit_regions.read_bed(self.tfbs_summit_fname)
for region in iter(tfbs_summit_regions):
summit = int(region.data.split()[-1]) + region.initial
region.initial = max(summit - (self.peak_ext / 2), 0)
region.final = summit + (self.peak_ext / 2)
# Calculating intersections
mpbs_regions = GenomicRegionSet("MPBS Regions")
mpbs_regions.read_bed(self.mpbs_fname)
tfbs_summit_regions.sort()
mpbs_regions.sort()
with_overlap_regions = mpbs_regions.intersect(tfbs_summit_regions, mode=OverlapType.ORIGINAL)
without_overlap_regions = mpbs_regions.subtract(tfbs_summit_regions, whole_region=True)
tfbs_regions = GenomicRegionSet("TFBS Regions")
for region in iter(with_overlap_regions):
region.name = region.name.split(":")[0] + ":Y"
tfbs_regions.add(region)
for region in iter(without_overlap_regions):
region.name = region.name.split(":")[0] + ":N"
tfbs_regions.add(region)
tfbs_regions.sort()
tfbs_fname = os.path.join(self.output_location, "{}.bed".format(self.mpbs_name))
tfbs_regions.write_bed(tfbs_fname)
示例4: chip_evaluate
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
def chip_evaluate(self):
"""
This evaluation methodology uses motif-predicted binding sites (MPBSs) together with TF ChIP-seq data
to evaluate the footprint predictions.
return:
"""
# Evaluate Statistics
fpr = dict()
tpr = dict()
roc_auc = dict()
roc_auc_1 = dict()
roc_auc_2 = dict()
recall = dict()
precision = dict()
prc_auc = dict()
if "SEG" in self.footprint_type:
mpbs_regions = GenomicRegionSet("TFBS")
mpbs_regions.read_bed(self.tfbs_file)
mpbs_regions.sort()
# Verifying the maximum score of the MPBS file
max_score = -99999999
for region in iter(mpbs_regions):
score = int(region.data)
if score > max_score:
max_score = score
max_score += 1
for i in range(len(self.footprint_file)):
footprints_regions = GenomicRegionSet("Footprints Prediction")
footprints_regions.read_bed(self.footprint_file[i])
# Sort footprint prediction bed files
footprints_regions.sort()
if self.footprint_type[i] == "SEG":
# Increasing the score of MPBS entry once if any overlaps found in the predicted footprints.
increased_score_mpbs_regions = GenomicRegionSet("Increased Regions")
intersect_regions = mpbs_regions.intersect(footprints_regions, mode=OverlapType.ORIGINAL)
for region in iter(intersect_regions):
region.data = str(int(region.data) + max_score)
increased_score_mpbs_regions.add(region)
# Keep the score of remained MPBS entry unchanged
without_intersect_regions = mpbs_regions.subtract(footprints_regions, whole_region=True)
for region in iter(without_intersect_regions):
increased_score_mpbs_regions.add(region)
increased_score_mpbs_regions.sort_score()
fpr[i], tpr[i], roc_auc[i], roc_auc_1[i], roc_auc_2[i] = self.roc_curve(increased_score_mpbs_regions)
recall[i], precision[i], prc_auc[i] = self.precision_recall_curve(increased_score_mpbs_regions)
elif self.footprint_type[i] == "SC":
footprints_regions.sort_score()
fpr[i], tpr[i], roc_auc[i], roc_auc_1[i], roc_auc_2[i] = self.roc_curve(footprints_regions)
recall[i], precision[i], prc_auc[i] = self.precision_recall_curve(footprints_regions)
# Output the statistics results into text
stats_fname = self.output_location + self.tf_name + "_stats.txt"
stats_header = ["METHOD", "AUC_100", "AUC_10", "AUC_1", "AUPR"]
with open(stats_fname, "w") as stats_file:
stats_file.write("\t".join(stats_header) + "\n")
for i in range(len(self.footprint_name)):
stats_file.write(self.footprint_name[i] + "\t" + str(roc_auc[i]) + "\t" + str(roc_auc_1[i]) + "\t"
+ str(roc_auc_2[i]) + "\t" + str(prc_auc[i]) + "\n")
# Output the curves
if self.print_roc_curve:
label_x = "False Positive Rate"
label_y = "True Positive Rate"
curve_name = "ROC"
self.plot_curve(fpr, tpr, roc_auc, label_x, label_y, self.tf_name, curve_name)
if self.print_pr_curve:
label_x = "Recall"
label_y = "Precision"
curve_name = "PRC"
self.plot_curve(recall, precision, prc_auc, label_x, label_y, self.tf_name, curve_name)
self.output_points(self.tf_name, fpr, tpr, recall, precision)
示例5: chip_evaluate
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
def chip_evaluate(args):
# Evaluate Statistics
fpr = dict()
tpr = dict()
roc_auc_1 = dict()
roc_auc_10 = dict()
roc_auc_50 = dict()
roc_auc_100 = dict()
recall = dict()
precision = dict()
prc_auc_1 = dict()
prc_auc_10 = dict()
prc_auc_50 = dict()
prc_auc_100 = dict()
footprint_file = args.footprint_file.split(",")
footprint_name = args.footprint_name.split(",")
footprint_type = args.footprint_type.split(",")
max_score = 0
if "SEG" in footprint_type:
mpbs_regions = GenomicRegionSet("TFBS")
mpbs_regions.read(args.tfbs_file)
# Verifying the maximum score of the MPBS file
for region in iter(mpbs_regions):
score = int(region.data.split("\t")[0])
if score > max_score:
max_score = score
max_score += 1
max_points = []
for i in range(len(footprint_file)):
footprints_regions = GenomicRegionSet("Footprints Prediction")
footprints_regions.read(footprint_file[i])
footprints_regions.sort()
if footprint_type[i] == "SEG":
# Increasing the score of MPBS entry once if any overlaps found in the predicted footprints.
increased_score_mpbs_regions = GenomicRegionSet("Increased Regions")
intersect_regions = mpbs_regions.intersect(footprints_regions, mode=OverlapType.ORIGINAL)
for region in iter(intersect_regions):
region.data = str(int(region.data.split("\t")[0]) + max_score)
increased_score_mpbs_regions.add(region)
# Keep the score of remained MPBS entry unchanged
without_intersect_regions = mpbs_regions.subtract(footprints_regions, whole_region=True)
for region in iter(without_intersect_regions):
increased_score_mpbs_regions.add(region)
increased_score_mpbs_regions.sort_score()
fpr[i], tpr[i], roc_auc_1[i], roc_auc_10[i], roc_auc_50[i], roc_auc_100[i] = \
roc_curve(increased_score_mpbs_regions)
recall[i], precision[i], prc_auc_1[i], prc_auc_10[i], prc_auc_50[i], prc_auc_100[i] = \
precision_recall_curve(increased_score_mpbs_regions)
max_points.append(len(intersect_regions))
elif footprint_type[i] == "SC":
footprints_regions.sort_score()
fpr[i], tpr[i], roc_auc_1[i], roc_auc_10[i], roc_auc_50[i], roc_auc_100[i] = \
roc_curve(footprints_regions)
recall[i], precision[i], prc_auc_1[i], prc_auc_10[i], prc_auc_50[i], prc_auc_100[i] = \
precision_recall_curve(footprints_regions)
max_points.append(len(footprints_regions))
# Output the statistics results into text
stats_fname = os.path.join(args.output_location, "{}_stats.txt".format(args.output_prefix))
stats_header = ["METHOD", "AUC_100", "AUC_50", "AUC_10", "AUC_1", "AUPR_100", "AUPR_50", "AUPR_10", "AUPR_1"]
with open(stats_fname, "w") as stats_file:
stats_file.write("\t".join(stats_header) + "\n")
for i in range(len(footprint_name)):
stats_file.write(footprint_name[i] + "\t" +
str(roc_auc_100[i]) + "\t" + str(roc_auc_50[i]) + "\t" + str(roc_auc_10[i]) + "\t" +
str(roc_auc_1[i]) + "\t" + str(prc_auc_100[i]) + "\t" + str(prc_auc_50[i]) + "\t" +
str(prc_auc_10[i]) + "\t" + str(prc_auc_1[i]) + "\n")
# Output the curves
if args.print_roc_curve:
label_x = "False Positive Rate"
label_y = "True Positive Rate"
curve_name = "ROC"
plot_curve(footprint_name, args.output_location, fpr, tpr, roc_auc_100, label_x, label_y, args.output_prefix,
curve_name, max_points=max_points)
if args.print_pr_curve:
label_x = "Recall"
label_y = "Precision"
curve_name = "PRC"
plot_curve(footprint_name, args.output_location, recall, precision, prc_auc_100, label_x, label_y,
args.output_prefix, curve_name, max_points=max_points)
output_points(footprint_name, args.output_location, args.output_prefix, fpr, tpr, recall, precision)
示例6: __init__
# 需要导入模块: from rgt.GenomicRegionSet import GenomicRegionSet [as 别名]
# 或者: from rgt.GenomicRegionSet.GenomicRegionSet import sort [as 别名]
class RandomTest:
def __init__(self, rna_fasta, rna_name, dna_region, organism, showdbs=False):
self.organism = organism
genome = GenomeData(organism)
self.genome_path = genome.get_genome()
# RNA: Path to the FASTA file
self.rna_fasta = rna_fasta
self.showdbs = showdbs
rnas = SequenceSet(name="rna", seq_type=SequenceType.RNA)
rnas.read_fasta(self.rna_fasta)
if rna_name:
self.rna_name = rna_name
else:
self.rna_name = rnas[0].name
# DNA: GenomicRegionSet
self.dna_region = GenomicRegionSet(name="target")
self.dna_region.read_bed(dna_region)
self.dna_region = self.dna_region.gene_association(organism=self.organism, show_dis=True)
self.topDBD = []
self.stat = OrderedDict(name=rna_name, genome=organism)
self.stat["target_regions"] = str(len(self.dna_region))
def get_rna_region_str(self, rna):
"""Getting the rna region from the information header with the pattern:
REGION_chr3_51978050_51983935_-_"""
self.rna_regions = get_rna_region_str(rna)
if self.rna_regions and len(self.rna_regions[0]) == 5:
self.rna_expression = float(self.rna_regions[0][-1])
else:
self.rna_expression = "n.a."
def connect_rna(self, rna, temp):
d = connect_rna(rna, temp, self.rna_name)
self.stat["exons"] = str(d[0])
self.stat["seq_length"] = str(d[1])
self.rna_len = d[1]
def target_dna(self, temp, remove_temp, cutoff, l, e, c, fr, fm, of, mf, par, obed=False):
"""Calculate the true counts of triplexes on the given dna regions"""
self.triplexator_p = [ l, e, c, fr, fm, of, mf ]
txp = find_triplex(rna_fasta=os.path.join(temp, "rna_temp.fa"), dna_region=self.dna_region,
temp=temp, organism=self.organism, remove_temp=remove_temp,
l=l, e=e, c=c, fr=fr, fm=fm, of=of, mf=mf, par=par, genome_path=self.genome_path,
prefix="targeted_region", dna_fine_posi=False)
txp.merge_rbs(rm_duplicate=True, region_set=self.dna_region, asgene_organism=self.organism, cutoff=cutoff)
self.txp = txp
self.stat["DBSs_target_all"] = str(len(self.txp))
txp.remove_duplicates()
self.rbss = txp.merged_dict.keys()
# if len(self.rbss) == 0:
# print("ERROR: No potential binding event. Please change the parameters.")
# sys.exit(1)
txpf = find_triplex(rna_fasta=os.path.join(temp, "rna_temp.fa"), dna_region=self.dna_region,
temp=temp, organism=self.organism, remove_temp=remove_temp,
l=l, e=e, c=c, fr=fr, fm=fm, of=of, mf=mf, par=par, genome_path=self.genome_path,
prefix="dbs", dna_fine_posi=True)
txpf.remove_duplicates()
txpf.merge_rbs(rbss=self.rbss, rm_duplicate=True, asgene_organism=self.organism)
self.txpf = txpf
self.stat["DBSs_target_all"] = str(len(self.txpf))
self.counts_tr = OrderedDict()
self.counts_dbs = OrderedDict()
for rbs in self.rbss:
tr = len(self.txp.merged_dict[rbs])
self.counts_tr[rbs] = [tr, len(self.dna_region) - tr]
self.counts_dbs[rbs] = len(self.txpf.merged_dict[rbs])
self.region_dbd = self.txpf.sort_rbs_by_regions(self.dna_region)
self.region_dbs = self.txpf.sort_rd_by_regions(regionset=self.dna_region)
self.region_dbsm = {}
self.region_coverage = {}
for region in self.dna_region:
self.region_dbsm[region.toString()] = self.region_dbs[region.toString()].get_dbs().merge(w_return=True)
self.region_coverage[region.toString()] = float(self.region_dbsm[region.toString()].total_coverage()) / len \
(region)
self.stat["target_regions"] = str(len(self.dna_region))
if obed:
# btr = self.txp.get_dbs()
# btr = btr.gene_association(organism=self.organism, show_dis=True)
# btr.write_bed(os.path.join(temp, obed + "_target_region_dbs.bed"))
# dbss = txpf.get_dbs()
# dbss.write_bed(os.path.join(temp, obed + "_dbss.bed"))
# output = self.dna_region.gene_association(organism=self.organism, show_dis=True)
self.txp.write_bed(filename=os.path.join(temp, obed + "_target_region_dbs.bed"),
dbd_tag=False,
remove_duplicates=False, associated=self.organism)
#.........这里部分代码省略.........