当前位置: 首页>>代码示例>>Python>>正文


Python ArtifactBuilderCollection.parse方法代码示例

本文整理汇总了Python中treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection.parse方法的典型用法代码示例。如果您正苦于以下问题:Python ArtifactBuilderCollection.parse方法的具体用法?Python ArtifactBuilderCollection.parse怎么用?Python ArtifactBuilderCollection.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection的用法示例。


在下文中一共展示了ArtifactBuilderCollection.parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: do_test

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def do_test(log, check_errors=True):
    """
    Test a single log.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = "file://{0}".format(
        SampleData().get_log_path("{0}.txt.gz".format(log)))

    builder = BuildbotLogViewArtifactBuilder(url, check_errors=check_errors)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]

    # we can't compare the "logurl" field, because it's a fully qualified url,
    # so it will be different depending on the config it's run in.
    assert "logurl" in act
    del(act["logurl"])

    exp = test_utils.load_exp("{0}.logview.json".format(log))

    # :: use to create the ``exp`` files, if you're making a lot of them
    # with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=4))
    #     f.close()

    # log urls won't match in tests, since they're machine specific
    # but leave it in the exp file as an example of what the real structure
    # should look like.
    del(exp["logurl"])

    assert act == exp#, diff(exp, act)
开发者ID:GoogleInternetAuthorityG2SUNGHAN,项目名称:treeherder-service,代码行数:36,代码来源:test_log_view_artifact_builder.py

示例2: handle

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
    def handle(self, *args, **options):
        if len(args) != 1:
            raise CommandError("Need to specify (only) log URL")

        if options['profile']:
            num_runs = options['profile']
        else:
            num_runs = 1

        times = []
        for i in range(num_runs):
            start = time.time()
            artifact_bc = ArtifactBuilderCollection(args[0],
                                                    check_errors=True)
            artifact_bc.parse()
            times.append(time.time() - start)

            if not options['profile']:
                for name, artifact in artifact_bc.artifacts.items():
                    print "%s, %s" % (name, json.dumps(artifact))

        if options['profile']:
            print "Timings: %s" % times
            print "Average: %s" % (sum(times)/len(times))
            print "Total: %s" % sum(times)
开发者ID:TheTeraByte,项目名称:treeherder,代码行数:27,代码来源:test_parse_log.py

示例3: test_all_builders_complete

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_all_builders_complete():
    """test when parse.complete is true creates correct structure"""
    log = "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50"
    url = "file://{0}".format(
        SampleData().get_log_path("{0}.txt.gz".format(log)))
    lpc = ArtifactBuilderCollection(
        url,
    )
    for builder in lpc.builders:
        builder.parser.complete = True

    lpc.parse()
    exp = {
        "text_log_summary": {
            "step_data": {
                "steps": [],
                "errors_truncated": False
            },
        },
        "Job Info": {
            "job_details": []
        }
    }
    act = lpc.artifacts

    # we can't compare the "logurl" field, because it's a fully qualified url,
    # so it will be different depending on the config it's run in.
    assert "logurl" in act["text_log_summary"]
    assert "logurl" in act["Job Info"]
    del(act["Job Info"]["logurl"])
    del(act["text_log_summary"]["logurl"])

    assert exp == lpc.artifacts, diff(exp, lpc.artifacts)
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:35,代码来源:test_artifact_builder_collection.py

示例4: test_all_builders_complete

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_all_builders_complete():
    """test when parse.complete is true creates correct structure"""
    url = add_log_response(
        "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50.txt.gz"
    )
    lpc = ArtifactBuilderCollection(url)
    for builder in lpc.builders:
        builder.parser.complete = True

    lpc.parse()
    exp = {
        "text_log_summary": {
            "step_data": {
                "steps": [],
                "errors_truncated": False
            },
            "logurl": url,
        },
        "Job Info": {
            "job_details": [],
            "logurl": url,
        }
    }

    assert exp == lpc.artifacts
开发者ID:ahal,项目名称:treeherder-service,代码行数:27,代码来源:test_artifact_builder_collection.py

示例5: extract_text_log_artifacts

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def extract_text_log_artifacts(project, log_url, job_guid):
    """Generate a summary artifact for the raw text log."""

    # parse a log given its url
    artifact_bc = ArtifactBuilderCollection(log_url)
    artifact_bc.parse()

    artifact_list = []
    for name, artifact in artifact_bc.artifacts.items():
        if name == 'Job Info':
            for detail in artifact['job_details']:
                if ('title' in detail and detail['title'] == 'artifact uploaded'
                        and detail['value'].endswith('_errorsummary.log')):
                    # using .send_task to avoid an import loop.
                    celery_app.send_task('store-error-summary',
                                         [project, detail['url'], job_guid],
                                         routing_key='store_error_summary')
        artifact_list.append({
            "job_guid": job_guid,
            "name": name,
            "type": 'json',
            "blob": json.dumps(artifact)
        })

    artifact_list.extend(get_error_summary_artifacts(artifact_list))

    return artifact_list
开发者ID:adusca,项目名称:treeherder,代码行数:29,代码来源:utils.py

示例6: do_test

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def do_test(log):
    """
    Test a single log with the ``JobArtifactBuilder``.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = add_log_response("{}.txt.gz".format(log))

    builder = BuildbotJobArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]
    exp = test_utils.load_exp("{0}.jobartifact.json".format(log))

    # :: Uncomment to create the ``exp`` files, if you're making a lot of them
    # import json
    # from tests.sampledata import SampleData
    # with open(SampleData().get_log_path("{0}.jobartifact.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=4))

    # assert act == exp, diff(exp, act)

    # if you want to gather results for a new test, use this
    assert len(act) == len(exp)
    for index, artifact in act.items():
        assert artifact == exp[index]
开发者ID:MikeLing,项目名称:treeherder,代码行数:30,代码来源:test_job_artifact_builder.py

示例7: do_test

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def do_test(log):
    """
    Test a single log with the ``JobArtifactBuilder``.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = "file://{0}".format(
        SampleData().get_log_path("{0}.txt.gz".format(log)))
    exp = test_utils.load_exp("{0}.jobartifact.json".format(log))

    builder = BuildbotJobArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]

    # we can't compare the "logurl" field, because it's a fully qualified url,
    # so it will be different depending on the config it's run in.
    assert "logurl" in act
    del(act["logurl"])
    # leaving the logurl in the exp files so they are a good example of the
    # expected structure.
    del(exp["logurl"])
    # assert act == exp, diff(exp, act)

    # if you want to gather results for a new test, use this
    assert len(act) == len(exp)
    for index, artifact in act.items():
        assert artifact == exp[index]
开发者ID:AnthonyMeaux,项目名称:treeherder,代码行数:32,代码来源:test_job_artifact_builder.py

示例8: extract_text_log_artifacts

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def extract_text_log_artifacts(project, log_url, job_guid):
    """Generate a summary artifact for the raw text log."""

    # parse a log given its url
    artifact_bc = ArtifactBuilderCollection(log_url)
    artifact_bc.parse()

    artifact_list = []
    for name, artifact in artifact_bc.artifacts.items():
        artifact_list.append({"job_guid": job_guid, "name": name, "type": "json", "blob": json.dumps(artifact)})

    artifact_list.extend(get_error_summary_artifacts(artifact_list))

    return artifact_list
开发者ID:samh12,项目名称:treeherder,代码行数:16,代码来源:utils.py

示例9: test_performance_log_parsing

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_performance_log_parsing():
    """
    Validate that we can parse a generic performance artifact
    """
    sd = SampleData()
    for logfile in ['mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz',
                    'try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz']:
        file_path = sd.get_log_path(logfile)
        file_url = 'file://{}'.format(file_path)

        builder = BuildbotPerformanceDataArtifactBuilder(url=file_url)
        lpc = ArtifactBuilderCollection(file_url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]
        validate(act['performance_data'], PERFHERDER_SCHEMA)
开发者ID:PratikDhanave,项目名称:treeherder,代码行数:17,代码来源:test_performance_artifact_builder.py

示例10: test_log_download_size_limit

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_log_download_size_limit():
    """Test that logs whose Content-Length exceed the size limit are not parsed."""
    url = 'http://foo.tld/fake_large_log.tar.gz'
    responses.add(
        responses.GET,
        url,
        body='',
        adding_headers={
            'Content-Encoding': 'gzip',
            'Content-Length': str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
        }
    )
    lpc = ArtifactBuilderCollection(url)

    with pytest.raises(LogSizeException):
        lpc.parse()
开发者ID:edmorley,项目名称:treeherder,代码行数:18,代码来源:test_artifact_builder_collection.py

示例11: extract_text_log_artifacts

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def extract_text_log_artifacts(job_log):
    """Generate a set of artifacts by parsing from the raw text log."""

    # parse a log given its url
    artifact_bc = ArtifactBuilderCollection(job_log.url)
    artifact_bc.parse()

    artifact_list = []
    for name, artifact in artifact_bc.artifacts.items():
        artifact_list.append({
            "job_guid": job_log.job.guid,
            "name": name,
            "type": 'json',
            "blob": json.dumps(artifact)
        })

    return artifact_list
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:19,代码来源:utils.py

示例12: test_talos_log_parsing

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_talos_log_parsing():
    """
    Make sure all performance data log examples validate with the
    talos json schema.
    """

    sd = SampleData()
    files = sd.get_talos_logs()

    for file_url in files:
        builder = BuildbotTalosDataArtifactBuilder(url=file_url)
        lpc = ArtifactBuilderCollection(file_url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]

        # Validate the data returned has the required datazilla
        # json schema
        for talos_datum in act['talos_data']:
            validate(talos_datum, TALOS_SCHEMA)
开发者ID:anurag619,项目名称:treeherder,代码行数:21,代码来源:test_performance_artifact_builder.py

示例13: test_performance_log_parsing

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_performance_log_parsing():
    """
    Validate that we can parse a generic performance artifact
    """

    # first two have only one artifact, second has two artifacts
    for (logfile, num_perf_artifacts) in [
            ('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
            ('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
            ('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2)]:
        url = add_log_response(logfile)

        builder = BuildbotPerformanceDataArtifactBuilder(url=url)
        lpc = ArtifactBuilderCollection(url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]
        assert len(act['performance_data']) == num_perf_artifacts
        for perfherder_artifact in act['performance_data']:
            validate(perfherder_artifact, PERFHERDER_SCHEMA)
开发者ID:MikeLing,项目名称:treeherder,代码行数:21,代码来源:test_performance_artifact_builder.py

示例14: test_performance_log_parsing

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
def test_performance_log_parsing():
    """
    Make sure all performance data log examples validate with the
    datazilla json schema.
    """

    sd = SampleData()
    files = sd.get_performance_logs()

    tda = TalosDataAdapter()

    for file_url in files:
        builder = BuildbotPerformanceDataArtifactBuilder(url=file_url)
        lpc = ArtifactBuilderCollection(file_url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]

        # Validate the data returned has the required datazilla
        # json schema
        validate(act['talos_data'][0], tda.datazilla_schema)
开发者ID:asutherland,项目名称:treeherder-service,代码行数:22,代码来源:test_performance_artifact_builder.py

示例15: handle

# 需要导入模块: from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection [as 别名]
# 或者: from treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection import parse [as 别名]
    def handle(self, *args, **options):
        if options['profile']:
            num_runs = options['profile']
        else:
            num_runs = 1

        times = []
        for _ in range(num_runs):
            start = time.time()
            artifact_bc = ArtifactBuilderCollection(options['log_url'])
            artifact_bc.parse()
            times.append(time.time() - start)

            if not options['profile']:
                for name, artifact in artifact_bc.artifacts.items():
                    print("%s, %s" % (name, json.dumps(artifact, indent=2)))

        if options['profile']:
            print("Timings: %s" % times)
            print("Average: %s" % (sum(times)/len(times)))
            print("Total: %s" % sum(times))
开发者ID:edmorley,项目名称:treeherder,代码行数:23,代码来源:test_parse_log.py


注:本文中的treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection.parse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。