当前位置: 首页>>代码示例>>Python>>正文


Python RandomForestRegressor.metrics方法代码示例

本文整理汇总了Python中sklearn.ensemble.RandomForestRegressor.metrics方法的典型用法代码示例。如果您正苦于以下问题:Python RandomForestRegressor.metrics方法的具体用法?Python RandomForestRegressor.metrics怎么用?Python RandomForestRegressor.metrics使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.RandomForestRegressor的用法示例。


在下文中一共展示了RandomForestRegressor.metrics方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from sklearn.ensemble import RandomForestRegressor [as 别名]
# 或者: from sklearn.ensemble.RandomForestRegressor import metrics [as 别名]
def main():

    """
    Main script function.
    :return:
    """

    def to_proportion(string):

        string = float(string)
        if string <= 0 or string > 100:
            raise ValueError(string)
        if 1 < string:
            string /= 100
        return string

    def to_pos(string):

        string = int(string)
        if string < 0:
            raise ValueError(string)

    parser = argparse.ArgumentParser(__doc__)
    subset = parser.add_mutually_exclusive_group()
    subset.add_argument("-r", "--random", type=int,
                        default=None,
                        help="A fixed of models to select for training.")
    subset.add_argument("-p", "--proportion", type=float,
                        default=None,
                        help="Proportion of the models to be used for training.")
    parser.add_argument("-c", "--conf",
                        required=True,
                        help="File with the configuration for selecting best and worst transcripts.")
    parser.add_argument("--regress", action="store_true", default=False)
    # parser.add_argument("-t", "--tmap",
    #                     help="The TMAP file with the comparison results.",
    #                     required=True)
    parser.add_argument("-m", "--metrics", help="The metrics file.", required=True)
    parser.add_argument("-o", "--out", help="Output file.", default="forest.model")
    parser.add_argument("-s", "--scoring", help="The original scoring file, to retrieve info on fragments.")
    args = parser.parse_args()

    # X should contain a matrix of features derived from the portcullis tab file
    # y should contain the labels (0 not a valid junction, 1 a valid junction).
    # Confirmed with the reference.

    # Load tab file and produce matrix
    # bed, tab = loadtab(args.input)
    # tmap_results = load_tmap(args.tmap)
    # scores = dict()
    # for tid in tmap_results:
    #     if tmap_results[tid].ccode == ("u",):
    #         continue
    #     recall = np.mean([tmap_results[tid].j_recall,
    #                       tmap_results[tid].e_recall,
    #                       tmap_results[tid].n_recall])
    #     precision = np.mean([tmap_results[tid].j_prec,
    #                          tmap_results[tid].e_prec,
    #                          tmap_results[tid].n_prec])
    #     if min(recall, precision) > 0:
    #         scores[tid] = hmean([recall, precision])
    #     else:
    #         scores[tid] = 0
    #
    # print("# TMAP results: " + str(len(tmap_results)))

    # Load reference and add labels
    # ref = bed12.loadbed(args.reference, False, False)
    # metrics = pandas.read_csv(args.metrics, delimiter="\t")

    metrics_pandas = pandas.read_csv(args.metrics, delimiter="\t")

    try:
        zeros = metrics_pandas[(((metrics_pandas.exon_num==1) & (metrics_pandas.combined_cds_length==0) & (metrics_pandas.cdna_length < 300 )) | ((metrics_pandas.exon_num>1) & (metrics_pandas.combined_cds_intron_fraction==0) & (metrics_pandas.retained_fraction>0.5 )) )].tid
    except AttributeError as exc:
        raise AttributeError("\n".join([str(exc), str("\n\t".join(list(metrics_pandas.columns)))]))
    hundreds = metrics_pandas[(metrics_pandas.proportion_verified_introns_inlocus==1) & (metrics_pandas.snowy_blast_score>10) & (metrics_pandas.retained_fraction==0) & (((metrics_pandas.exon_num>1) & (metrics_pandas.verified_introns_num>2)) | ((metrics_pandas.exon_num==1) & (metrics_pandas.utr_num==2)) )].tid

    metrics = load_metrics(args.metrics)

    scores = dict()

    for z in zeros:
        scores[z] = 0
    for h in hundreds:
        scores[h] = 100

    print("# metered transcripts:", len(metrics))

    if args.random is not None or args.proportion is not None:
        if args.random is not None:
            selected = random.sample(scores.keys(), args.random)
        else:
            selected = random.sample(scores.keys(),
                                     int(floor(len(scores) * args.proportion)))

        scores = dict(_ for _ in scores.items() if _[0] in selected)
        metrics = dict(_ for _ in metrics.items() if _[0] in selected)

    X = np.zeros((len(scores), len(MetricEntry.metrics)))
#.........这里部分代码省略.........
开发者ID:Jamure,项目名称:Mikado,代码行数:103,代码来源:self_training.py


注:本文中的sklearn.ensemble.RandomForestRegressor.metrics方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。