当前位置: 首页>>代码示例>>Python>>正文


Python TreeherderClient.get_jobs方法代码示例

本文整理汇总了Python中thclient.TreeherderClient.get_jobs方法的典型用法代码示例。如果您正苦于以下问题:Python TreeherderClient.get_jobs方法的具体用法?Python TreeherderClient.get_jobs怎么用?Python TreeherderClient.get_jobs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在thclient.TreeherderClient的用法示例。


在下文中一共展示了TreeherderClient.get_jobs方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: on_buildbot_event

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
def on_buildbot_event(data, message, dry_run, stage=False):
    """Act upon buildbot events."""
    # Pulse gives us a job_id and a job_guid, we need request_id.
    LOG.info(
        "%s action requested by %s on repo_name %s with job_id: %s"
        % (data["action"], data["requester"], data["project"], data["job_id"])
    )
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    if stage:
        treeherder_client = TreeherderClient(host="treeherder.allizom.org")
    else:
        treeherder_client = TreeherderClient()
    repo_name = data["project"]
    job_id = data["job_id"]
    result = treeherder_client.get_jobs(repo_name, id=job_id)
    # If result not found, ignore
    if not result:
        LOG.info("We could not find any result for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        message.ack()
        return

    result = result[0]
    buildername = result["ref_data_name"]
    resultset_id = result["result_set_id"]
    result_sets = treeherder_client.get_resultsets(repo_name, id=resultset_id)
    revision = result_sets[0]["revision"]
    action = data["action"]
    status = None

    buildername = filter_invalid_builders(buildername)

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    if buildername is None:
        status = "Builder %s was invalid." % buildername[0]

    # Backfill action
    elif action == "backfill":
        manual_backfill(revision, buildername, max_revisions=get_maxRevisions(buildername), dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled"

    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {"job_id": job_id, "action": action, "requester": data["requester"], "status": status}
    routing_key = "{}.{}".format(repo_name, action)
    try:
        message_sender.publish_message(pulse_message, routing_key)
    except:
        LOG.warning("Failed to publish message over pulse stream.")

    if not dry_run:
        # We need to ack the message to remove it from our queue
        message.ack()
开发者ID:F3real,项目名称:pulse_actions,代码行数:61,代码来源:treeherder_buildbot.py

示例2: get_all_jobs

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
def get_all_jobs(repo_name, revision):
    '''Return dictionary of all jobs for a given revision

    Return: {'<revision_hash>': {'<job_id_1>': <job_id_1_metadata>}}
    '''
    print "Fetching Treeherder jobs for {}/{}".format(repo_name, revision)
    th_client = TreeherderClient()
    results = th_client.get_resultsets(repo_name, revision=revision)
    all_jobs = {}
    if results:
        revision_id = results[0]["id"]
        for job in th_client.get_jobs(repo_name, count=6000, result_set_id=revision_id):
            # Grab job metadata
            all_jobs[job['id']] = job

    return {revision: all_jobs}
开发者ID:armenzg,项目名称:push_metrics,代码行数:18,代码来源:extract_metrics_from_push.py

示例3: on_event

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
def on_event(data, message, dry_run, treeherder_server_url, acknowledge, **kwargs):
    """Act upon Treeherder job events.

    Return if the outcome was successful or not
    """
    exit_code = 0  # SUCCESS

    if ignored(data):
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        return exit_code

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    action = data["action"].capitalize()
    job_id = data["job_id"]
    repo_name = data["project"]
    status = None

    # We want to know the status of the job we're processing
    try:
        job_info = treeherder_client.get_jobs(repo_name, id=job_id)[0]
    except IndexError:
        LOG.info("We could not find any job_info for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        return exit_code

    buildername = job_info["ref_data_name"]

    # We want to know the revision associated for this job
    result_sets = treeherder_client.get_resultsets(repo_name, id=job_info["result_set_id"])
    revision = result_sets[0]["revision"]

    link_to_job = "{}/#/jobs?repo={}&revision={}&selectedJob={}".format(
        treeherder_server_url, repo_name, revision, job_id
    )

    LOG.info("{} action requested by {} for '{}'".format(action, data["requester"], buildername))
    LOG.info("Request for {}".format(link_to_job))

    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        LOG.info("Treeherder can send us invalid builder names.")
        LOG.info("See https://bugzilla.mozilla.org/show_bug.cgi?id=1242038.")
        LOG.warning('Requested job name "%s" is invalid.' % job_info["ref_data_name"])
        exit_code = -1  # FAILURE

    # There are various actions that can be taken on a job, however, we currently
    # only process the backfill one
    elif action == "Backfill":
        exit_code = manual_backfill(revision=revision, buildername=buildername, dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled."
        LOG.debug(status)

    else:
        LOG.error('We were not aware of the "{}" action. Please file an issue'.format(action))
        exit_code = -1  # FAILURE

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()

    return exit_code
开发者ID:armenzg,项目名称:pulse_actions,代码行数:73,代码来源:treeherder_job_action.py

示例4: TreeherderApi

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class TreeherderApi(QueryApi):

    def __init__(self):
        self.treeherder_client = TreeherderClient()

    def get_all_jobs(self, repo_name, revision, **params):
        """
        Return all jobs for a given revision.
        If we can't query about this revision in treeherder api, we return an empty list.
        """
        # We query treeherder for its internal revision_id, and then get the jobs from them.
        # We cannot get jobs directly from revision and repo_name in TH api.
        # See: https://bugzilla.mozilla.org/show_bug.cgi?id=1165401
        results = self.treeherder_client.get_resultsets(repo_name, revision=revision, **params)
        all_jobs = []
        if results:
            revision_id = results[0]["id"]
            all_jobs = self.treeherder_client.get_jobs(repo_name, count=2000,
                                                       result_set_id=revision_id, **params)
        return all_jobs

    def get_buildapi_request_id(self, repo_name, job):
        """ Method to return buildapi's request_id. """
        job_id = job["id"]
        query_params = {'job_id': job_id,
                        'name': 'buildapi'}
        LOG.debug("We are fetching request_id from treeherder artifacts api")
        artifact_content = self.treeherder_client.get_artifacts(repo_name,
                                                                **query_params)
        return artifact_content[0]["blob"]["request_id"]

    def get_hidden_jobs(self, repo_name, revision):
        """ Return all hidden jobs on Treeherder """
        return self.get_all_jobs(repo_name, revision=revision, visibility='excluded')

    def get_matching_jobs(self, repo_name, revision, buildername):
        """
        Return all jobs that matched the criteria.
        """
        LOG.debug("Find jobs matching '%s'" % buildername)
        all_jobs = self.get_all_jobs(repo_name, revision)
        matching_jobs = []
        for j in all_jobs:
            if j["ref_data_name"] == buildername:
                matching_jobs.append(j)

        LOG.debug("We have found %d job(s) of '%s'." %
                  (len(matching_jobs), buildername))
        return matching_jobs

    def get_job_status(self, job):
        """
        Helper to determine the scheduling status of a job from treeherder.

        Raises a TreeherderError if the job doesn't complete.
        """
        if job["job_coalesced_to_guid"] is not None:
            return COALESCED

        if job["result"] == "unknown":
            if job["state"] == "pending":
                return PENDING
            elif job["state"] == "running":
                return RUNNING
            else:
                return UNKNOWN

        # If the job 'state' is completed, we can have the following possible statuses:
        # https://github.com/mozilla/treeherder/blob/master/treeherder/etl/buildbot.py#L7
        status_dict = {
            "success": SUCCESS,
            "busted": FAILURE,
            "testfailed": FAILURE,
            "skipped": SKIPPED,
            "exception": EXCEPTION,
            "retry": RETRY,
            "usercancel": CANCELLED
            }

        if job["state"] == "completed":
            return status_dict[job["result"]]

        LOG.debug(job)
        raise TreeherderError("Unexpected status")

    def find_all_jobs_by_status(self, repo_name, revision, status):
        builder_names = []
        jobs = self.get_all_jobs(repo_name, revision)
        # filer out those jobs without builder name
        jobs = [job for job in jobs if job['machine_name'] != 'unknown']
        for job in jobs:
            try:
                job_status = self.get_job_status(job)
            except TreeherderError:
                continue
            if job_status == status:
                if job['build_system_type'] == 'taskcluster':
                    job_name = job['job_type_name']
                else:
                    job_name = job['ref_data_name']
#.........这里部分代码省略.........
开发者ID:F3real,项目名称:mozilla_ci_tools,代码行数:103,代码来源:query_jobs.py

示例5: TreeherderApi

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class TreeherderApi(QueryApi):

    def __init__(self, server_url='https://treeherder.mozilla.org', treeherder_host=None):
        if treeherder_host:
            LOG.warning("The `TreeherderApi()` parameter `treeherder_host` is deprecated. "
                        "Use `server_url` instead, or omit entirely to use the default of "
                        "production Treeherder.")
            server_url = 'https://%s' % treeherder_host
        self.treeherder_client = TreeherderClient(server_url=server_url)

    def get_all_jobs(self, repo_name, revision, **params):
        """
        Return all jobs for a given revision.
        If we can't query about this revision in treeherder api, we return an empty list.
        """
        # We query treeherder for its internal revision_id, and then get the jobs from them.
        # We cannot get jobs directly from revision and repo_name in TH api.
        # See: https://bugzilla.mozilla.org/show_bug.cgi?id=1165401
        results = self.treeherder_client.get_resultsets(repo_name, revision=revision, **params)
        all_jobs = []
        if results:
            revision_id = results[0]["id"]
            all_jobs = self.treeherder_client.get_jobs(repo_name, count=2000,
                                                       result_set_id=revision_id, **params)
        return all_jobs

    def get_buildapi_request_id(self, repo_name, job):
        """ Method to return buildapi's request_id. """
        job_details = self.treeherder_client.get_job_details(
            job_id=job["id"],
            title='buildbot_request_id',
            repository=repo_name)
        if not job_details:
            raise ValueError("No buildbot request id for job ({}, {}, {})".format(
                job["id"], 'buildbot_request_id', repo_name
            ))

        return int(job_details[0]["value"])

    def get_hidden_jobs(self, repo_name, revision):
        """ Return all hidden jobs on Treeherder """
        return self.get_all_jobs(repo_name, revision=revision, visibility='excluded')

    def get_matching_jobs(self, repo_name, revision, buildername):
        """
        Return all jobs that matched the criteria.
        """
        LOG.debug("Find jobs matching '%s'" % buildername)
        all_jobs = self.get_all_jobs(repo_name, revision)
        matching_jobs = []
        for j in all_jobs:
            if j["ref_data_name"] == buildername:
                matching_jobs.append(j)

        LOG.debug("We have found %d job(s) of '%s'." %
                  (len(matching_jobs), buildername))
        return matching_jobs

    def get_job_status(self, job):
        """
        Helper to determine the scheduling status of a job from treeherder.

        Raises a TreeherderError if the job doesn't complete.
        """
        if job["job_coalesced_to_guid"] is not None:
            return COALESCED

        if job["result"] == "unknown":
            if job["state"] == "pending":
                return PENDING
            elif job["state"] == "running":
                return RUNNING
            else:
                return UNKNOWN

        # If the job 'state' is completed, we can have the following possible statuses:
        # https://github.com/mozilla/treeherder/blob/master/treeherder/etl/buildbot.py#L7
        status_dict = {
            "success": SUCCESS,
            "busted": FAILURE,
            "testfailed": FAILURE,
            "skipped": SKIPPED,
            "exception": EXCEPTION,
            "retry": RETRY,
            "usercancel": CANCELLED
            }

        if job["state"] == "completed":
            return status_dict[job["result"]]

        LOG.debug(job)
        raise TreeherderError("Unexpected status")

    def find_all_jobs_by_status(self, repo_name, revision, status):
        builder_names = []
        jobs = self.get_all_jobs(repo_name, revision)
        # filer out those jobs without builder name
        jobs = [job for job in jobs if job['machine_name'] != 'unknown']
        for job in jobs:
            try:
#.........这里部分代码省略.........
开发者ID:armenzg,项目名称:mozilla_ci_tools,代码行数:103,代码来源:query_jobs.py

示例6: get_test_packages_url

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
def get_test_packages_url(properties):
    """Return the URL of the test packages JSON file.

    In case of localized daily builds we can query the en-US build to get
    the URL, but for candidate builds we need the tinderbox build
    of the first parent changeset which was not checked-in by the release
    automation process (necessary until bug 1242035 is not fixed).
    """
    overrides = {
        'locale': 'en-US',
        'extension': 'test_packages.json',
        'build_type': 'tinderbox',
        'retry_attempts': 0,
    }

    platform_map = {
        'linux': {'build_platform': 'linux32'},
        'linux64': {'build_platform': 'linux64'},
        'mac': {'build_os': 'mac', 'build_architecture': 'x86_64'},
        'win32': {'build_os': 'win', 'build_architecture': 'x86'},
        'win64': {'build_os': 'win', 'build_architecture': 'x86_64'},
    }

    revision = properties['revision'][:12]

    client = TreeherderClient(host='treeherder.mozilla.org', protocol='https')
    resultsets = client.get_resultsets(properties['branch'],
                                       tochange=revision,
                                       count=50)

    # Retrieve the option hashes to filter for opt builds
    option_hash = None
    for key, values in client.get_option_collection_hash().iteritems():
        for value in values:
            if value['name'] == 'opt':
                option_hash = key
                break
        if option_hash:
            break

    # Set filters to speed-up querying jobs
    kwargs = {
        'job_type_name': 'Build',
        'exclusion_profile': False,
        'option_collection_hash': option_hash,
        'result': 'success',
    }
    kwargs.update(platform_map[properties['platform']])

    for resultset in resultsets:
        kwargs.update({'result_set_id': resultset['id']})
        jobs = client.get_jobs(properties['branch'], **kwargs)
        if len(jobs):
            revision = resultset['revision']
            break

    overrides['revision'] = revision

    # For update tests we need the test package of the target build. That allows
    # us to add fallback code in case major parts of the ui are changing in Firefox.
    if properties.get('target_buildid'):
        overrides['build_id'] = properties['target_buildid']

    # The test package json file has a prefix with bug 1239808 fixed. Older builds need
    # a fallback to a prefix-less filename.
    try:
        url = query_file_url(properties, property_overrides=overrides)
    except download_errors.NotFoundError:
        overrides.pop('extension')
        build_url = query_file_url(properties, property_overrides=overrides)
        url = '{}/test_packages.json'.format(build_url[:build_url.rfind('/')])

    return url
开发者ID:KaiRo-at,项目名称:mozmill-ci,代码行数:75,代码来源:trigger.py

示例7: retrieve_test_logs

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
def retrieve_test_logs(repo, revision, platform='linux64',
                       cache_dir=None, use_cache=True,
                       warning_re=WARNING_RE):
    """
    Retrieves and processes the test logs for the given revision.

    Returns list of processed files.
    """
    if not cache_dir:
        cache_dir = "%s-%s-%s" % (repo, revision, platform)

    cache = logspam.cache.Cache(cache_dir, warning_re)

    cache_dir_exists = os.path.isdir(cache_dir)
    if cache_dir_exists and use_cache:
        # We already have logs for this revision.
        print "Using cached data"
        try:
            return cache.read_results()
        except logspam.cache.CacheFileNotFoundException as e:
            print "Cache file for %s not found" % warning_re
            print e

    client = TreeherderClient()
    print "getting result set"
    pushes = client.get_pushes(repo, revision=revision)
    print "pushes = client.get_pushes('%s', revision='%s')" % (repo, revision)
    print "got pushes"
    if not pushes:
        print "Failed to find %s in %s" % (revision, repo)
        return None

    print "getting jobs"
    for x in range(5):
        try:
            # option_collection_hash is just the convoluted way of specifying
            # we want a debug build.
            print "jobs = client.get_jobs('%s',result_set_id=%d, count=5000, platform='%s', option_collection_hash='%s')" % (
                    repo, pushes[0]['id'], platform, DEBUG_OPTIONHASH)
            jobs = client.get_jobs(repo,
                                   result_set_id=pushes[0]['id'],
                                   count=5000, # Just make this really large to avoid pagination
                                   platform=platform,
                                   option_collection_hash=DEBUG_OPTIONHASH,
                                   state='completed')
            break
        except requests.exceptions.ConnectionError:
            pass

    if not jobs:
        print "No jobs found for %s %s" % (revision, platform)
        import traceback
        traceback.print_exc()
        return None

    print "got jobs"

    print "getting %d job log urls" % len(jobs)
    job_ids = [ job['id'] for job in jobs ]
    print job_ids
    for x in range(5):
        logs = []
        try:
            for y in range(0, len(job_ids), 100):
                logs = logs + client.get_job_log_url(repo, job_id=job_ids[y:y+100])
            job_logs = logs
            break
        except requests.exceptions.ConnectionError, e:
            pass
开发者ID:EricRahm,项目名称:log-spam-hell,代码行数:71,代码来源:logs.py

示例8: Treeherder

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class Treeherder(object):
    """Wrapper class for TreeherderClient to ease the use of its API."""

    def __init__(self, application, branch, platform, server_url=TREEHERDER_URL):
        """Create a new instance of the Treeherder class.

        :param application: The name of the application to download.
        :param branch: Name of the branch.
        :param platform: Platform of the application.
        :param server_url: The URL of the Treeherder instance to access.
        """
        self.logger = logging.getLogger(__name__)

        self.client = TreeherderClient(server_url=server_url)
        self.application = application
        self.branch = branch
        self.platform = platform

    def get_treeherder_platform(self, platform):
        """Return the internal Treeherder platform identifier.

        :param platform: Platform of the application.
        """
        try:
            return PLATFORM_MAP[platform]
        except KeyError:
            raise NotSupportedError('Platform "{}" is not supported.'.format(platform))

    def query_builds_by_revision(self, revision, job_type_name='Build', debug_build=False):
        """Retrieve build folders for a given revision with the help of Treeherder.

        :param revision: Revision of the build to download.
        :param job_type_name: Name of the job to look for. For builds it should be
            'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`.
        :param debug_build: Download a debug build.
        """
        builds = set()

        try:
            self.logger.info('Querying {url} for list of builds for revision: {revision}'.format(
                             url=self.client.server_url, revision=revision))

            # Retrieve the option hash to filter for type of build (opt, and debug for now)
            option_hash = None
            for key, values in self.client.get_option_collection_hash().iteritems():
                for value in values:
                    if value['name'] == ('debug' if debug_build else 'opt'):
                        option_hash = key
                        break
                if option_hash:
                    break

            resultsets = self.client.get_pushes(self.branch, revision=revision)

            # Set filters to speed-up querying jobs
            kwargs = {
                'option_collection_hash': option_hash,
                'job_type_name': job_type_name,
                'exclusion_profile': False,
            }
            kwargs.update(self.get_treeherder_platform(self.platform))

            for resultset in resultsets:
                kwargs.update({'result_set_id': resultset['id']})
                jobs = self.client.get_jobs(self.branch, **kwargs)
                for job in jobs:
                    log_urls = self.client.get_job_log_url(self.branch, job_id=job['id'])
                    for log_url in log_urls:
                        if self.application in log_url['url']:
                            self.logger.debug('Found build folder: {}'.format(log_url['url']))
                            builds.update([log_url['url']])

        except Exception:
            self.logger.exception('Failure occurred when querying Treeherder for builds')

        return list(builds)
开发者ID:Nebelhom,项目名称:mozdownload,代码行数:78,代码来源:treeherder.py

示例9: get_test_packages_url

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
    def get_test_packages_url(self, properties):
        """Return the URL of the test packages JSON file.

        In case of localized daily builds we can query the en-US build to get
        the URL, but for candidate builds we need the tinderbox build
        of the first parent changeset which was not checked-in by the release
        automation process (necessary until bug 1242035 is not fixed).
        """
        if properties.get('test_packages_url'):
            url = properties['test_packages_url']
        else:
            overrides = {
                'locale': 'en-US',
                'extension': 'test_packages.json',
            }

            # Use Treeherder to query for the next revision which has Tinderbox builds
            # available. We can use this revision to retrieve the test-packages URL.
            if properties['tree'].startswith('release-'):
                platform_map = {
                    'linux': {'build_platform': 'linux32'},
                    'linux64': {'build_platform': 'linux64'},
                    'macosx': {'build_os': 'mac', 'build_architecture': 'x86_64'},
                    'macosx64': {'build_os': 'mac', 'build_architecture': 'x86_64'},
                    'win32': {'build_os': 'win', 'build_architecture': 'x86'},
                    'win64': {'build_os': 'win', 'build_architecture': 'x86_64'},
                }

                self.logger.info('Querying tinderbox revision for {} build...'.format(
                                 properties['tree']))
                revision = properties['revision'][:12]

                client = TreeherderClient(server_url='https://treeherder.mozilla.org')
                resultsets = client.get_resultsets(properties['branch'],
                                                   tochange=revision,
                                                   count=50)

                # Retrieve the option hashes to filter for opt builds
                option_hash = None
                for key, values in client.get_option_collection_hash().iteritems():
                    for value in values:
                        if value['name'] == 'opt':
                            option_hash = key
                            break
                    if option_hash:
                        break

                # Set filters to speed-up querying jobs
                kwargs = {
                    'job_type_name': 'Build',
                    'exclusion_profile': False,
                    'option_collection_hash': option_hash,
                    'result': 'success',
                }
                kwargs.update(platform_map[properties['platform']])

                for resultset in resultsets:
                    kwargs.update({'result_set_id': resultset['id']})
                    jobs = client.get_jobs(properties['branch'], **kwargs)
                    if len(jobs):
                        revision = resultset['revision']
                        break

                self.logger.info('Found revision for tinderbox build: {}'.format(revision))

                overrides['build_type'] = 'tinderbox'
                overrides['revision'] = revision

            # For update tests we need the test package of the target build. That allows
            # us to add fallback code in case major parts of the ui are changing in Firefox.
            if properties.get('target_buildid'):
                overrides['build_id'] = properties['target_buildid']

            # The test package json file has a prefix with bug 1239808 fixed. Older builds need
            # a fallback to a prefix-less filename.
            try:
                self.logger.info('Querying test packages URL...')
                url = self.query_file_url(properties, property_overrides=overrides)
            except download_errors.NotFoundError:
                self.logger.info('URL not found. Querying not-prefixed test packages URL...')
                extension = overrides.pop('extension')
                build_url = self.query_file_url(properties, property_overrides=overrides)
                url = '{}/{}'.format(build_url[:build_url.rfind('/')], extension)
                r = requests.head(url)
                if r.status_code != 200:
                    url = None

            self.logger.info('Found test package URL at: {}'.format(url))

        return url
开发者ID:whimboo,项目名称:mozmill-ci,代码行数:92,代码来源:automation.py

示例10: TriggerBuild

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]

#.........这里部分代码省略.........

            # create agent status folder
            if os.path.exists(os.path.join(os.getcwd(), self.DEFAULT_AGENT_STATUS_DIR)) is False:
                os.mkdir(os.path.join(os.getcwd(), self.DEFAULT_AGENT_STATUS_DIR))

            # move to agent config folder
            if sys.platform == "linux2":
                new_hasal_json_fp = os.path.join(self.DEFAULT_AGENT_CONF_DIR_LINUX, self.HASAL_JSON_FN)
            elif sys.platform == "darwin":
                new_hasal_json_fp = os.path.join(self.DEFAULT_AGENT_CONF_DIR_MAC, self.HASAL_JSON_FN)
            else:
                new_hasal_json_fp = os.path.join(self.DEFAULT_AGENT_CONF_DIR_WIN, self.HASAL_JSON_FN)
            os.rename(self.HASAL_JSON_FN, new_hasal_json_fp)

            if os.path.exists(new_hasal_json_fp):
                print "INFO: hasal json file move to new location [%s]" % new_hasal_json_fp
            else:
                print "ERROR: hasal json file in not in new location [%s]" % new_hasal_json_fp
            sys.exit(0)

    def fetch_resultset(self, user_email, build_hash, default_count=500):
        tmp_resultsets = self.thclient.get_resultsets(self.repo, count=default_count)
        for resultset in tmp_resultsets:
            if resultset['author'].lower() == user_email.lower():
                self.resultsets.append(resultset)
                if build_hash is None:
                    return resultset
                elif resultset['revision'] == build_hash:
                    return resultset
        print "Can't find the specify build hash [%s] in resultsets!!" % build_hash
        return None

    def get_job(self, resultset, platform_keyword_list):
        jobs = self.thclient.get_jobs(self.repo, result_set_id=resultset['id'])
        for job in jobs:
            cnt = 0
            for platform_keyword in platform_keyword_list:
                if platform_keyword in job['platform']:
                    cnt += 1
            if job['platform_option'] == self.platform_option and cnt == len(platform_keyword_list):
                return job
        print "Can't find the specify platform [%s] and platform_options [%s] in jobs!!!" % (self.platform, self.platform_option)
        return None

    def get_files_from_remote_url_folder(self, remote_url_str):
        return_dict = {}
        try:
            response_obj = urllib2.urlopen(remote_url_str)
            if response_obj.getcode() == 200:
                for line in response_obj.readlines():
                    match = re.search(r'(?<=href=").*?(?=")', line)
                    if match:
                        href_link = match.group(0)
                        f_name = href_link.split("/")[-1]
                        return_dict[f_name] = href_link
            else:
                print "ERROR: fetch remote file list error with code [%s]" % str(response_obj.getcode())
        except Exception as e:
            print "ERROR: [%s]" % e.message
        return return_dict

    def download_file(self, output_dp, download_link):
        print "Prepare to download the build from link [%s]" % download_link
        response = requests.get(download_link, verify=False, stream=True)
        download_fn = download_link.split("/")[-1]
        if os.path.exists(output_dp) is False:
开发者ID:Conjuror,项目名称:Hasal,代码行数:70,代码来源:trigger_build.py

示例11: Treeherder

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class Treeherder(object):
    """Wrapper class for TreeherderClient to ease the use of its API."""

    def __init__(self, application, branch, platform, host=TREEHERDER_HOST, protocol="https"):
        """Create a new instance of the Treeherder class.

        :param application: The name of the application to download.
        :param branch: Name of the branch.
        :param platform: Platform of the application.
        :param host: The Treeherder host to make use of.
        :param protocol: The protocol for the Treeherder host.
        """
        self.logger = logging.getLogger(__name__)

        self.client = TreeherderClient(host=host, protocol=protocol)
        self.application = application
        self.branch = branch
        self.platform = platform

    def get_treeherder_platform(self, platform):
        """Return the internal Treeherder platform identifier.

        :param platform: Platform of the application.
        """
        try:
            return PLATFORM_MAP[platform]
        except KeyError:
            raise NotSupportedError('Platform "{}" is not supported.'.format(platform))

    def query_builds_by_revision(self, revision, job_type_name="Build", debug_build=False):
        """Retrieve build folders for a given revision with the help of Treeherder.

        :param revision: Revision of the build to download.
        :param job_type_name: Name of the job to look for. For builds it should be
            'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`.
        :param debug_build: Download a debug build.
        """
        builds = set()

        try:
            self.logger.info(
                "Querying {host} for list of builds for revision: {revision}".format(
                    host=self.client.host, revision=revision
                )
            )

            # Retrieve the option hash to filter for type of build (opt, and debug for now)
            option_hash = None
            for key, values in self.client.get_option_collection_hash().iteritems():
                for value in values:
                    if value["name"] == ("debug" if debug_build else "opt"):
                        option_hash = key
                        break
                if option_hash:
                    break

            resultsets = self.client.get_resultsets(self.branch, revision=revision)

            # Set filters to speed-up querying jobs
            kwargs = {"option_collection_hash": option_hash, "job_type_name": job_type_name, "exclusion_profile": False}
            kwargs.update(self.get_treeherder_platform(self.platform))

            for resultset in resultsets:
                kwargs.update({"result_set_id": resultset["id"]})
                jobs = self.client.get_jobs(self.branch, **kwargs)
                for job in jobs:
                    log_urls = self.client.get_job_log_url(self.branch, job_id=job["id"])
                    for log_url in log_urls:
                        if self.application in log_url["url"]:
                            self.logger.debug("Found build folder: {}".format(log_url["url"]))
                            builds.update([log_url["url"]])

        except Exception:
            self.logger.exception("Failure occurred when querying Treeherder for builds")

        return list(builds)
开发者ID:Motwani,项目名称:mozdownload,代码行数:78,代码来源:treeherder.py

示例12: TreeWatcher

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class TreeWatcher(object):
    """Class to keep track of test jobs starting and finishing, known
    revisions and builders, and re-trigger jobs in either when a job
    fails or a when requested by a user.

    Redundant triggers are prevented by keeping track of each buildername,
    tree, revision we've already triggered. The invariant is that for
    any (buildername, tree, revision) combination, we will only issue triggers
    once. Old revisions are purged after a certain interval, so care must
    be taken that enough revisions are stored at a time to prevent issuing
    redundant triggers.
    """
    # Allow at least this many failures for a revision.
    # If we re-trigger for each orange and per-push orange
    # factor is approximately fixed, we shouldn't need to trigger
    # much more than that for any push that would be suitable to land.
    default_retry = 1
    per_push_failures = 4
    # We may trigger more than this as long as the total is below this
    # proportion of all builds for a push (~3% of jobs for now).
    failure_tolerance_factor = 33

    # See the comment below about pruning old revisions.
    revmap_threshold = 2000
    # If someone asks for more than 20 rebuilds on a push, only give them 20.
    requested_limit = 20

    def __init__(self, ldap_auth, is_triggerbot_user=lambda _: True):
        self.revmap = defaultdict(dict)
        self.revmap_threshold = TreeWatcher.revmap_threshold
        self.auth = ldap_auth
        self.lower_trigger_limit = TreeWatcher.default_retry * TreeWatcher.per_push_failures
        self.log = logging.getLogger('trigger-bot')
        self.is_triggerbot_user = is_triggerbot_user
        self.global_trigger_count = 0
        self.treeherder_client = TreeherderClient()
        self.hidden_builders = set()
        self.refresh_builder_counter = 0

    def _prune_revmap(self):
        # After a certain point we'll need to prune our revmap so it doesn't grow
        # infinitely.
        # We only need to keep an entry around from when we last see it
        # as an incoming revision and the next time it's finished and potentially
        # failed, but it could be pending for a while so we don't know how long that
        # will be.
        target_count = int(TreeWatcher.revmap_threshold * 2/3)
        prune_count = len(self.revmap.keys()) - target_count
        self.log.info('Pruning %d entries from the revmap' % prune_count)

        # Could/should use an LRU cache here, but assuming any job will go
        # from pending to complete in 24 hrs and we have up to 528 pushes a
        # day (like we had last April fool's day), that's still just 528
        # entries to sort.
        for rev, data in sorted(self.revmap.items(), key=lambda (k, v): v['time_seen']):
            if not prune_count:
                self.log.info('Finished pruning, oldest rev is now: %s' % rev)
                return

            del self.revmap[rev]
            prune_count -= 1

    def known_rev(self, branch, rev):
        return rev in self.revmap


    def _get_jobs(self, branch, rev, hidden):
        results = self.treeherder_client.get_resultsets(branch, revision=rev)
        jobs = []
        if results:
            result_set_id = results[0]['id']
            kwargs = {
                'count': 2000,
                'result_set_id': result_set_id,
            }
            if hidden:
                kwargs['visibility'] = 'excluded'
            jobs = self.treeherder_client.get_jobs(branch, **kwargs)
        return [job['ref_data_name'] for job in jobs
                if not re.match('[a-z0-9]{12}', job['ref_data_name'])]


    def get_hidden_jobs(self, branch, rev):
        return self._get_jobs(branch, rev, True)


    def get_visible_jobs(self, branch, rev):
        return self._get_jobs(branch, rev, False)


    def update_hidden_builders(self, branch, rev):
        hidden_builders = set(self.get_hidden_jobs(branch, rev))
        visible_builders = set(self.get_visible_jobs(branch, rev))
        self.hidden_builders -= visible_builders
        self.hidden_builders |= hidden_builders
        self.log.info('Updating hidden builders')
        self.log.info('There are %d hidden builders on try' %
                      len(self.hidden_builders))


#.........这里部分代码省略.........
开发者ID:armenzg,项目名称:trigger-bot,代码行数:103,代码来源:tree_watcher.py

示例13: GetBuild

# 需要导入模块: from thclient import TreeherderClient [as 别名]
# 或者: from thclient.TreeherderClient import get_jobs [as 别名]
class GetBuild(object):
    ARCHIVE_URL = "https://archive.mozilla.org"
    NIGHTLY_LATEST_URL_FOLDER = "/pub/firefox/nightly/latest-mozilla-central/"
    PLATFORM_FN_MAPPING = {'linux32': {'key': 'linux-i686', 'ext': 'tar.bz2', 'trydl': 'linux', 'job': ['linux32']},
                           'linux64': {'key': 'linux-x86_64', 'ext': 'tar.bz2', 'trydl': 'linux64', 'job': ['linux64']},
                           'mac': {'key': 'mac', 'ext': 'dmg', 'trydl': 'macosx64', 'job': ['osx']},
                           'win32': {'key': 'win32', 'ext': 'zip', 'trydl': 'win32', 'job': ['windows', '32']},
                           'win64': {'key': 'win64', 'ext': 'zip', 'trydl': 'win64', 'job': ['windows', '64']}}

    def __init__(self, repo, platform, status_check):
        self.repo = repo
        self.platform = platform
        self.platform_option = 'opt'
        self.resultsets = []
        self.skip_status_check = status_check
        self.thclient = TreeherderClient()

    def fetch_resultset(self, user_email, build_hash, default_count=500):
        tmp_resultsets = self.thclient.get_resultsets(self.repo, count=default_count)
        for resultset in tmp_resultsets:
            if resultset['author'].lower() == user_email.lower():
                self.resultsets.append(resultset)
                if build_hash is None:
                    return resultset
                elif resultset['revision'] == build_hash:
                    return resultset
        print "Can't find the specify build hash [%s] in resultsets!!" % build_hash
        return None

    def get_job(self, resultset, platform_keyword_list):
        jobs = self.thclient.get_jobs(self.repo, result_set_id=resultset['id'])
        for job in jobs:
            cnt = 0
            for platform_keyword in platform_keyword_list:
                if platform_keyword in job['platform']:
                    cnt += 1
            if job['platform_option'] == self.platform_option and cnt == len(platform_keyword_list):
                return job
        print "Can't find the specify platform [%s] and platform_options [%s] in jobs!!!" % (self.platform, self.platform_option)
        return None

    def get_files_from_remote_url_folder(self, remote_url_str):
        return_dict = {}
        try:
            response_obj = urllib2.urlopen(remote_url_str)
            if response_obj.getcode() == 200:
                for line in response_obj.readlines():
                    match = re.search(r'(?<=href=").*?(?=")', line)
                    if match:
                        href_link = match.group(0)
                        f_name = href_link.split("/")[-1]
                        return_dict[f_name] = href_link
            else:
                print "ERROR: fetch remote file list error with code [%s]" % str(response_obj.getcode())
        except Exception as e:
            print "ERROR: [%s]" % e.message
        return return_dict

    def download_file(self, output_dp, download_link):
        print "Prepare to download the build from link [%s]" % download_link
        response = requests.get(download_link, verify=False, stream=True)
        download_fn = download_link.split("/")[-1]
        if os.path.exists(output_dp) is False:
            os.makedirs(output_dp)
        download_fp = os.path.join(output_dp, download_fn)
        try:
            try:
                total_len = int(response.headers['content-length'])
            except:
                total_len = None
            with open(download_fp, 'wb') as fh:
                for data in tqdm(response.iter_content(chunk_size=512 * 1024), total=total_len / (512 * 1024)):
                    fh.write(data)
            return download_fp
        except Exception as e:
            print "ERROR: [%s]" % e.message
            return None

    def download_from_remote_url_folder(self, remote_url_str, output_dp):
        # get latest nightly build list from remote url folder
        remote_file_dict = self.get_files_from_remote_url_folder(remote_url_str)

        # filter with platform, and return file name with extension
        if len(remote_file_dict.keys()) == 0:
            print "ERROR: can't get remote file list, could be the network error, or url path[%s] wrong!!" % remote_url_str
            return False
        else:
            if self.platform not in self.PLATFORM_FN_MAPPING:
                print "ERROR: we are currently not support the platform[%s] you specified!" % self.platform
                print "We are currently support the platform tag: [%s]" % self.PLATFORM_FN_MAPPING.keys()
                return False
            else:
                matched_keyword = self.PLATFORM_FN_MAPPING[self.platform]['key'] + "." + self.PLATFORM_FN_MAPPING[self.platform]['ext']
                matched_file_list = [fn for fn in remote_file_dict.keys() if matched_keyword in fn and "firefox" in fn]
                if len(matched_file_list) != 1:
                    print "WARN: the possible match file list is not equal 1, list as below: [%s]" % matched_file_list
                    if len(matched_file_list) < 1:
                        return False
                    matched_file_list = sorted(matched_file_list)[-1:]
                    print "WARN: select following file [%s]" % matched_file_list
#.........这里部分代码省略.........
开发者ID:Conjuror,项目名称:Hasal,代码行数:103,代码来源:get_build.py


注:本文中的thclient.TreeherderClient.get_jobs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。