当前位置: 首页>>代码示例>>Python>>正文


Python HTCondorUtils.quote方法代码示例

本文整理汇总了Python中HTCondorUtils.quote方法的典型用法代码示例。如果您正苦于以下问题:Python HTCondorUtils.quote方法的具体用法?Python HTCondorUtils.quote怎么用?Python HTCondorUtils.quote使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在HTCondorUtils的用法示例。


在下文中一共展示了HTCondorUtils.quote方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: killAll

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def killAll(self):

        # We need to keep ROOT DAG in hold until periodic remove kicks in.
        # See DagmanSubmitter.py#L390 (dagAd["PeriodicRemove"])
        # This is needed in case user wants to resubmit.
        rootConst = "TaskType =?= \"ROOT\" && CRAB_ReqName =?= %s" % HTCondorUtils.quote(self.workflow)
        # Holding DAG job does not mean that it will remove all jobs
        # and this must be done separately
        # --------------------------------------
        # From HTCondor documentation
        # http://research.cs.wisc.edu/htcondor/manual/v8.3/2_10DAGMan_Applications.html#SECTION003107000000000000000
        # --------------------------------------
        # After placing the condor_dagman job on hold, no new node jobs will be submitted,
        # and no PRE or POST scripts will be run. Any node jobs already in the HTCondor queue
        # will continue undisturbed. If the condor_dagman job is left on hold, it will remain
        # in the HTCondor queue after all of the currently running node jobs are finished.
        # --------------------------------------
        # TODO: Remove jobConst query when htcondor ticket is solved
        # https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=5175
        jobConst = "TaskType =!= \"ROOT\" && CRAB_ReqName =?= %s" % HTCondorUtils.quote(self.workflow)

        with HTCondorUtils.AuthenticatedSubprocess(self.proxy) as (parent, rpipe):
            if not parent:
                with self.schedd.transaction() as tsc:
                    self.schedd.act(htcondor.JobAction.Hold, rootConst)
                    self.schedd.act(htcondor.JobAction.Remove, jobConst)
        results = rpipe.read()
        if results != "OK":
            raise TaskWorkerException("The CRAB3 server backend could not kill the task because the Grid scheduler answered with an error\n"\
                                      "This is probably a temporary glitch, please try it again and contact an expert if the error persist\n"+\
                                      "Error reason %s" % results)
开发者ID:nizamyusli,项目名称:CRABServer,代码行数:33,代码来源:DagmanKiller.py

示例2: getScheddObj

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def getScheddObj(self, name):
     """
     Return a tuple (schedd, address) containing an object representing the
     remote schedd and its corresponding address.
     Still required for OLD tasks. Remove it later TODO
     """
     info = name.split("_")
     if len(info) > 3:
         name = info[2]
     else:
         name = self.getSchedd()
     if name == "localhost":
         schedd = htcondor.Schedd()
         with open(htcondor.param['SCHEDD_ADDRESS_FILE']) as fd:
             address = fd.read().split("\n")[0]
     else:
         info = name.split(":")
         pool = "localhost"
         if len(info) == 3:
             pool = info[1]
         htcondor.param['COLLECTOR_HOST'] = self.getCollector(pool)
         coll = htcondor.Collector()
         schedds = coll.query(htcondor.AdTypes.Schedd, 'regexp(%s, Name)' % HTCondorUtils.quote(info[0]))
         self.scheddAd = ""
         if not schedds:
             self.scheddAd = self.getCachedCollectorOutput(info[0])
         else:
             self.cacheCollectorOutput(info[0], schedds[0])
             self.scheddAd = self.getCachedCollectorOutput(info[0])
         address = self.scheddAd['MyAddress']
         schedd = htcondor.Schedd(self.scheddAd)
     return schedd, address
开发者ID:mialiu149,项目名称:CRABServer,代码行数:34,代码来源:HTCondorLocator.py

示例3: getScheddObj

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def getScheddObj(self, name):
     """
     Return a tuple (schedd, address) containing an object representing the
     remote schedd and its corresponding address.
     """
     info = name.split("_")
     if len(info) > 3:
         name = info[2]
     else:
         name = self.getSchedd()
     if name == "localhost":
         schedd = htcondor.Schedd()
         with open(htcondor.param['SCHEDD_ADDRESS_FILE']) as fd:
             address = fd.read().split("\n")[0]
     else:
         info = name.split(":")
         pool = "localhost"
         if len(info) == 3:
             pool = info[1]
         htcondor.param['COLLECTOR_HOST'] = self.getCollector(pool)
         coll = htcondor.Collector()
         schedds = coll.query(htcondor.AdTypes.Schedd, 'regexp(%s, Name)' % HTCondorUtils.quote(info[0]))
         if not schedds:
             raise Exception("Unable to locate schedd %s" % info[0])
         self.scheddAd = schedds[0]
         address = self.scheddAd['MyAddress']
         schedd = htcondor.Schedd(self.scheddAd)
     return schedd, address
开发者ID:khurtado,项目名称:CRABServer,代码行数:30,代码来源:HTCondorLocator.py

示例4: executeInternal

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def executeInternal(self, apmon, *args, **kwargs):
        #Marco: I guess these value errors only happens for development instances
        if 'task' not in kwargs:
            raise ValueError("No task specified.")
        self.task = kwargs['task']
        if 'tm_taskname' not in self.task:
            raise ValueError("No taskname specified")
        self.workflow = self.task['tm_taskname']
        if 'user_proxy' not in self.task:
            raise ValueError("No proxy provided")
        self.proxy = self.task['user_proxy']

        self.logger.info("About to kill workflow: %s." % self.workflow)

        self.workflow = str(self.workflow)
        if not WORKFLOW_RE.match(self.workflow):
            raise Exception("Invalid workflow name.")

        # Query HTCondor for information about running jobs and update Dashboard appropriately
        if self.task['tm_collector']:
            self.backendurls['htcondorPool'] = self.task['tm_collector']
        loc = HTCondorLocator.HTCondorLocator(self.backendurls)

        address = ""
        try:
            self.schedd, address = loc.getScheddObjNew(self.task['tm_schedd'])
        except Exception as exp:
            msg  = "The CRAB server backend was not able to contact the Grid scheduler."
            msg += " Please try again later."
            msg += " If the error persists send an e-mail to %s." % (FEEDBACKMAIL)
            msg += " Message from the scheduler: %s" % (str(exp))
            self.logger.exception("%s: %s" % (self.workflow, msg))
            raise TaskWorkerException(msg)

        try:
            hostname = socket.getfqdn()
        except:
            hostname = ''

        const = 'CRAB_ReqName =?= %s && TaskType=?="Job"' % HTCondorUtils.quote(self.workflow)
        try:
            for ad in list(self.schedd.xquery(const, ['CRAB_Id', 'CRAB_Retry'])):
                if ('CRAB_Id' not in ad) or ('CRAB_Retry' not in ad):
                    continue
                jobid = str(ad.eval('CRAB_Id'))
                jobretry = str(ad.eval('CRAB_Retry'))
                jinfo = {'broker': hostname,
                         'bossId': jobid,
                         'StatusValue': 'killed',
                        }
                insertJobIdSid(jinfo, jobid, self.workflow, jobretry)
                self.logger.info("Sending kill info to Dashboard: %s" % str(jinfo))
                apmon.sendToML(jinfo)
        except:
            self.logger.exception("Failed to notify Dashboard of job kills") #warning

        # Note that we can not send kills for jobs not in queue at this time; we'll need the
        # DAG FINAL node to be fixed and the node status to include retry number.
        return self.killAll(const)
开发者ID:belforte,项目名称:CRABServer,代码行数:61,代码来源:DagmanKiller.py

示例5: killJobs

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def killJobs(self, ids):
     ad = classad.ClassAd()
     ad['foo'] = ids
     const = "CRAB_ReqName =?= %s && member(CRAB_Id, %s)" % (HTCondorUtils.quote(self.workflow), ad.lookup("foo").__repr__())
     with HTCondorUtils.AuthenticatedSubprocess(self.proxy) as (parent, rpipe):
         if not parent:
             self.schedd.act(htcondor.JobAction.Remove, const)
     results = rpipe.read()
     if results != "OK":
         raise TaskWorkerException("The CRAB3 server backend could not kill jobs [%s]. because the Grid scheduler answered with an error\n" % ", ".join(ids)+\
                                   "This is probably a temporary glitch, please try it again and contact an expert if the error persist\n"+\
                                   "Error reason %s" % results)
开发者ID:nizamyusli,项目名称:CRABServer,代码行数:14,代码来源:DagmanKiller.py

示例6: killAll

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def killAll(self):

        # Search for and hold the DAG
        rootConst = "TaskType =?= \"ROOT\" && CRAB_ReqName =?= %s" % HTCondorUtils.quote(self.workflow)

        with HTCondorUtils.AuthenticatedSubprocess(self.proxy) as (parent, rpipe):
            if not parent:
                self.schedd.act(htcondor.JobAction.Hold, rootConst)
        results = rpipe.read()
        if results != "OK":
            raise TaskWorkerException("The CRAB3 server backend could not kill the task because the Grid scheduler answered with an error\n"\
                                      "This is probably a temporary glitch, please try it again and contact an expert if the error persist\n"+\
                                      "Error reason %s" % results)
开发者ID:khurtado,项目名称:CRABServer,代码行数:15,代码来源:DagmanKiller.py

示例7: duplicateCheck

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def duplicateCheck(self, task):
        """
        Look to see if the task we are about to submit is already in the schedd.
        If so, assume that this task in TaskWorker was run successfully, but killed
        before it could update the frontend.
        """
        workflow = task["tm_taskname"]

        if task["tm_collector"]:
            self.backendurls["htcondorPool"] = task["tm_collector"]
        loc = HTCondorLocator.HTCondorLocator(self.backendurls)

        schedd = ""
        try:
            self.logger.debug("Duplicate check is getting the schedd obj. Collector is: %s", task["tm_collector"])
            schedd, dummyAddress = loc.getScheddObjNew(task["tm_schedd"])
            self.logger.debug("Got schedd obj for %s ", task["tm_schedd"])
        except Exception as exp:
            msg = "The CRAB server backend was not able to contact the Grid scheduler."
            msg += " Please try again later."
            msg += " If the error persists send an e-mail to %s." % (FEEDBACKMAIL)
            msg += " Message from the scheduler: %s" % (str(exp))
            self.logger.exception("%s: %s", workflow, msg)
            raise TaskWorkerException(msg)

        rootConst = (
            'TaskType =?= "ROOT" && CRAB_ReqName =?= %s && (isUndefined(CRAB_Attempt) || CRAB_Attempt == 0)'
            % HTCondorUtils.quote(workflow)
        )

        self.logger.debug("Duplicate check is querying the schedd: %s", rootConst)
        results = list(schedd.xquery(rootConst, []))
        self.logger.debug("Schedd queried %s", results)

        if not results:
            # Task not already in schedd
            return None

        configreq = {"workflow": workflow, "status": "SUBMITTED", "subresource": "success"}
        self.logger.warning(
            "Task %s already submitted to HTCondor; pushing information centrally: %s", workflow, str(configreq)
        )
        data = urllib.urlencode(configreq)
        self.server.post(self.resturi, data=data)

        # Note that we don't re-send Dashboard jobs; we assume this is a rare occurrance and
        # don't want to upset any info already in the Dashboard.

        return Result.Result(task=task, result=(-1))
开发者ID:AndresTanasijczuk,项目名称:CRABServer,代码行数:51,代码来源:DagmanSubmitter.py

示例8: killJobs

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def killJobs(self, ids):
     ad = classad.ClassAd()
     ad['foo'] = ids
     const = "CRAB_ReqName =?= %s && member(CRAB_Id, %s)" % (HTCondorUtils.quote(self.workflow), ad.lookup("foo").__repr__())
     with HTCondorUtils.AuthenticatedSubprocess(self.proxy) as (parent, rpipe):
         if not parent:
             self.schedd.act(htcondor.JobAction.Remove, const)
     results = rpipe.read()
     if results != "OK":
         msg  = "The CRAB server backend was not able to kill these jobs %s," % (ids)
         msg += " because the Grid scheduler answered with an error."
         msg += " This is probably a temporary glitch. Please try again later."
         msg += " If the error persists send an e-mail to %s." % (FEEDBACKMAIL)
         msg += " Error reason: %s" % (results)
         raise TaskWorkerException(msg)
开发者ID:dciangot,项目名称:CRABServer,代码行数:17,代码来源:DagmanKiller.py

示例9: getScheddObjNew

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def getScheddObjNew(self, schedd):
     """
     Return a tuple (schedd, address) containing an object representing the
     remote schedd and its corresponding address.
     """
     htcondor.param['COLLECTOR_HOST'] = self.getCollector().encode('ascii', 'ignore')
     coll = htcondor.Collector()
     schedds = coll.query(htcondor.AdTypes.Schedd, 'regexp(%s, Name)' % HTCondorUtils.quote(schedd.encode('ascii', 'ignore')))
     self.scheddAd = ""
     if not schedds:
         self.scheddAd = self.getCachedCollectorOutput(schedd)
     else:
         self.cacheCollectorOutput(schedd, schedds[0])
         self.scheddAd = self.getCachedCollectorOutput(schedd)
     address = self.scheddAd['MyAddress']
     scheddObj = htcondor.Schedd(self.scheddAd)
     return scheddObj, address
开发者ID:jmarra13,项目名称:CRABServer,代码行数:19,代码来源:HTCondorLocator.py

示例10: getScheddObjNew

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
 def getScheddObjNew(self, schedd):
     """
     Return a tuple (schedd, address) containing an object representing the
     remote schedd and its corresponding address.
     """
     htcondor.param['COLLECTOR_HOST'] = self.getCollector().encode('ascii', 'ignore')
     coll = htcondor.Collector()
     schedds = coll.query(htcondor.AdTypes.Schedd, 'Name=?=%s' % HTCondorUtils.quote(schedd.encode('ascii', 'ignore')),
                          ["AddressV1", "CondorPlatform", "CondorVersion", "Machine", "MyAddress", "Name", "MyType", "ScheddIpAddr", "RemoteCondorSetup"])
     self.scheddAd = ""
     if not schedds:
         self.scheddAd = self.getCachedCollectorOutput(schedd)
     else:
         self.cacheCollectorOutput(schedd, schedds[0])
         self.scheddAd = self.getCachedCollectorOutput(schedd)
     address = self.scheddAd['MyAddress']
     scheddObj = htcondor.Schedd(self.scheddAd)
     return scheddObj, address
开发者ID:mmascher,项目名称:CRABServer,代码行数:20,代码来源:HTCondorLocator.py

示例11: str

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
        workflow = task['tm_taskname']

        if task['tm_collector']:
            self.backendurls['htcondorPool'] = task['tm_collector']
        loc = HTCondorLocator.HTCondorLocator(self.backendurls)

        address = ""
        schedd = ""
        try:
            schedd, address = loc.getScheddObjNew(task['tm_schedd'])
        except Exception, exp:
            msg = ("%s: The CRAB3 server backend is not able to contact Grid scheduler. Please, retry later. Message from the scheduler: %s") % (workflow, str(exp))
            self.logger.exception(msg)
            raise TaskWorkerException(msg)

        rootConst = 'TaskType =?= "ROOT" && CRAB_ReqName =?= %s && (isUndefined(CRAB_Attempt) || CRAB_Attempt == 0)' % HTCondorUtils.quote(workflow)

        results = list(schedd.xquery(rootConst, []))

        if not results:
            # Task not already in schedd
            return None

        configreq = {'workflow': workflow,
                     'status': "SUBMITTED",
                     'jobset': "-1",
                     'subresource': 'success',
                    }
        self.logger.warning("Task %s already submitted to HTCondor; pushing information centrally: %s" % (workflow, str(configreq)))
        data = urllib.urlencode(configreq)
        self.server.post(self.resturi, data = data)
开发者ID:mialiu149,项目名称:CRABServer,代码行数:33,代码来源:DagmanSubmitter.py

示例12: execute_internal

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def execute_internal(self, *args, **kw):
        #Marco: I guess these value errors only happens for development instances
        if 'task' not in kw:
            raise ValueError("No task specified.")
        task = kw['task']
        if 'tm_taskname' not in task:
            raise ValueError("No taskname specified.")
        workflow = str(task['tm_taskname'])
        if 'user_proxy' not in task:
            raise ValueError("No proxy provided")
        proxy = task['user_proxy']

        self.logger.info("About to resubmit workflow: %s." % workflow)
        self.logger.info("Task info: %s" % str(task))

        if task['tm_collector']:
            self.backendurls['htcondorPool'] = task['tm_collector']
        loc = HTCondorLocator.HTCondorLocator(self.backendurls)

        schedd = ""
        address = ""
        try:
            schedd, address = loc.getScheddObjNew(task['tm_schedd'])
        except Exception as exp:
            msg = ("%s: The CRAB3 server backend is not able to contact Grid scheduler. Please, retry later. Message from the scheduler: %s") % (workflow, str(exp))
            self.logger.exception(msg)
            raise TaskWorkerException(msg)

        # Check memory and walltime
        if task['resubmit_maxjobruntime'] != None and task['resubmit_maxjobruntime'] > 2800:
            msg = "task requests %s minutes of walltime but only %s is guaranteed to be available. Jobs may not find a site where to run. CRAB3 have changed this value to %s minutes" % (task['resubmit_maxjobruntime'], '2800', '2800')
            self.logger.warning(msg)
            task['resubmit_maxjobruntime'] = '2800'
            self.uploadWarning(msg, kw['task']['user_proxy'], kw['task']['tm_taskname'])
        if task['resubmit_maxmemory'] != None and task['resubmit_maxmemory'] > 2500:
            msg = "task requests %s memory but only %s is guaranteed to be available. Jobs may not find a site where to run and stay idle forever" % (task['resubmit_maxmemory'], '2500')
            self.logger.warning(msg)
            self.uploadWarning(msg, kw['task']['user_proxy'], kw['task']['tm_taskname'])

        # Release the DAG
        rootConst = "TaskType =?= \"ROOT\" && CRAB_ReqName =?= %s" % HTCondorUtils.quote(workflow)

        ## Calculate new parameters for resubmited jobs. These parameters will
        ## be (re)written in the _CONDOR_JOB_AD when we do schedd.edit() below.
        ad = classad.ClassAd()
        params = {'CRAB_ResubmitList'  : 'jobids',
                  'CRAB_SiteBlacklist' : 'site_blacklist',
                  'CRAB_SiteWhitelist' : 'site_whitelist',
                  'MaxWallTimeMins'    : 'maxjobruntime',
                  'RequestMemory'      : 'maxmemory',
                  'RequestCpus'        : 'numcores',
                  'JobPrio'            : 'priority'
                 }
        overwrite = False
        for taskparam in params.values():
            if ('resubmit_'+taskparam in task) and task['resubmit_'+taskparam] != None:
                if type(task['resubmit_'+taskparam]) == list:
                    ad[taskparam] = task['resubmit_'+taskparam]
                if taskparam != 'jobids':
                    overwrite = True

        if ('resubmit_jobids' in task) and task['resubmit_jobids']:
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    schedd.edit(rootConst, "HoldKillSig", 'SIGKILL')
                    ## Overwrite parameters in the os.environ[_CONDOR_JOB_AD] file. This will affect
                    ## all the jobs, not only the ones we want to resubmit. That's why the pre-job
                    ## is saving the values of the parameters for each job retry in text files (the
                    ## files are in the directory resubmit_info in the schedd).
                    for adparam, taskparam in params.iteritems():
                        if taskparam in ad:
                            schedd.edit(rootConst, adparam, ad[taskparam])
                        elif task['resubmit_'+taskparam] != None:
                            schedd.edit(rootConst, adparam, str(task['resubmit_'+taskparam]))
                    schedd.act(htcondor.JobAction.Hold, rootConst)
                    schedd.edit(rootConst, "HoldKillSig", 'SIGUSR1')
                    schedd.act(htcondor.JobAction.Release, rootConst)
        elif overwrite:
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    self.logger.debug("Resubmitting under condition overwrite = True")
                    for adparam, taskparam in params.iteritems():
                        if taskparam in ad:
                            if taskparam == 'jobids' and len(list(ad[taskparam])) == 0:
                                self.logger.debug("Setting %s = True in the task ad." % (adparam))
                                schedd.edit(rootConst, adparam, classad.ExprTree("true"))
                            else:
                                schedd.edit(rootConst, adparam, ad[taskparam])
                        elif task['resubmit_'+taskparam] != None:
                            schedd.edit(rootConst, adparam, str(task['resubmit_'+taskparam]))
                    schedd.act(htcondor.JobAction.Release, rootConst)
        else:
            ## This should actually not occur anymore in CRAB 3.3.16 or above, because
            ## starting from CRAB 3.3.16 the resubmission parameters are written to the
            ## Task DB with value != None, so the overwrite variable should never be False.
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    self.logger.debug("Resubmitting under condition overwrite = False")
                    schedd.edit(rootConst, "HoldKillSig", 'SIGKILL')
                    schedd.edit(rootConst, "CRAB_ResubmitList", classad.ExprTree("true"))
#.........这里部分代码省略.........
开发者ID:nizamyusli,项目名称:CRABServer,代码行数:103,代码来源:DagmanResubmitter.py

示例13: executeInternal

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def executeInternal(self, *args, **kwargs):  # pylint: disable=unused-argument
        # Marco: I guess these value errors only happens for development instances
        if "task" not in kwargs:
            raise ValueError("No task specified.")
        task = kwargs["task"]
        if "tm_taskname" not in task:
            raise ValueError("No taskname specified.")
        workflow = str(task["tm_taskname"])
        if "user_proxy" not in task:
            raise ValueError("No proxy provided")
        proxy = task["user_proxy"]

        if task.get("resubmit_publication", False):
            resubmitWhat = "publications"
        else:
            resubmitWhat = "jobs"

        self.logger.info("About to resubmit %s for workflow: %s." % (resubmitWhat, workflow))
        self.logger.debug("Task info: %s" % str(task))

        if task.get("resubmit_publication", False):
            asourl = task.get("tm_asourl", None)
            # Let's not assume the db has been updated (mostly for devs), let's default asodb to asynctransfer!
            # Also the "or" takes care of the case were the new code is executed on old task
            # i.e.: tm_asodb is there but empty.
            asodb = task.get("tm_asodb", "asynctransfer") or "asynctransfer"
            if not asourl:
                msg = "ASO URL not set. Can not resubmit publication."
                raise TaskWorkerException(msg)
            self.logger.info("Will resubmit failed publications")
            self.resubmitPublication(asourl, asodb, proxy, workflow)
            return

        if task["tm_collector"]:
            self.backendurls["htcondorPool"] = task["tm_collector"]
        loc = HTCondorLocator.HTCondorLocator(self.backendurls)

        schedd = ""
        dummyAddress = ""
        try:
            schedd, dummyAddress = loc.getScheddObjNew(task["tm_schedd"])
        except Exception as exp:
            msg = "The CRAB server backend was not able to contact the Grid scheduler."
            msg += " Please try again later."
            msg += " If the error persists send an e-mail to %s." % (FEEDBACKMAIL)
            msg += " Message from the scheduler: %s" % (str(exp))
            self.logger.exception("%s: %s" % (workflow, msg))
            raise TaskWorkerException(msg)

        # Check memory and walltime
        stdmaxjobruntime = 2800
        stdmaxmemory = 2500
        if task["resubmit_maxjobruntime"] is not None and task["resubmit_maxjobruntime"] > stdmaxjobruntime:
            msg = "Task requests %s minutes of walltime, but only %s are guaranteed to be available." % (
                task["resubmit_maxjobruntime"],
                stdmaxjobruntime,
            )
            msg += " Jobs may not find a site where to run."
            msg += " CRAB has changed this value to %s minutes." % (stdmaxjobruntime)
            self.logger.warning(msg)
            task["resubmit_maxjobruntime"] = str(stdmaxjobruntime)
            self.uploadWarning(msg, proxy, kwargs["task"]["tm_taskname"])
        if task["resubmit_maxmemory"] is not None and task["resubmit_maxmemory"] > stdmaxmemory:
            msg = "Task requests %s MB of memory, but only %s MB are guaranteed to be available." % (
                task["resubmit_maxmemory"],
                stdmaxmemory,
            )
            msg += " Jobs may not find a site where to run and stay idle forever."
            self.logger.warning(msg)
            self.uploadWarning(msg, proxy, kwargs["task"]["tm_taskname"])

        # Release the DAG
        rootConst = 'TaskType =?= "ROOT" && CRAB_ReqName =?= %s' % HTCondorUtils.quote(workflow)

        ## Calculate new parameters for resubmitted jobs. These parameters will
        ## be (re)written in the _CONDOR_JOB_AD when we do schedd.edit() below.
        ad = classad.ClassAd()
        params = {
            "CRAB_ResubmitList": "jobids",
            "CRAB_SiteBlacklist": "site_blacklist",
            "CRAB_SiteWhitelist": "site_whitelist",
            "MaxWallTimeMins": "maxjobruntime",
            "RequestMemory": "maxmemory",
            "RequestCpus": "numcores",
            "JobPrio": "priority",
        }
        overwrite = False
        for taskparam in params.values():
            if ("resubmit_" + taskparam in task) and task["resubmit_" + taskparam] != None:
                # In case resubmission parameters contain a list of unicode strings,
                # convert it to a list of ascii strings because of HTCondor unicode
                # incompatibility.
                # Note that unicode strings that are not in a list are not handled,
                # but so far they don't exist in this part of the code.
                if isinstance(task["resubmit_" + taskparam], list):
                    nonUnicodeList = []
                    for p in task["resubmit_" + taskparam]:
                        if isinstance(p, unicode):
                            nonUnicodeList.append(p.encode("ascii", "ignore"))
                        else:
#.........这里部分代码省略.........
开发者ID:emaszs,项目名称:CRABServer,代码行数:103,代码来源:DagmanResubmitter.py

示例14: killAll

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def killAll(self, jobConst):

        # We need to keep ROOT, PROCESSING, and TAIL DAGs in hold until periodic remove kicks in.
        # This is needed in case user wants to resubmit.
        rootConst = 'stringListMember(TaskType, "ROOT PROCESSING TAIL", " ") && CRAB_ReqName =?= %s' % HTCondorUtils.quote(self.workflow)

        # Holding DAG job does not mean that it will remove all jobs
        # and this must be done separately
        # --------------------------------------
        # From HTCondor documentation
        # http://research.cs.wisc.edu/htcondor/manual/v8.3/2_10DAGMan_Applications.html#SECTION003107000000000000000
        # --------------------------------------
        # After placing the condor_dagman job on hold, no new node jobs will be submitted,
        # and no PRE or POST scripts will be run. Any node jobs already in the HTCondor queue
        # will continue undisturbed. If the condor_dagman job is left on hold, it will remain
        # in the HTCondor queue after all of the currently running node jobs are finished.
        # --------------------------------------
        # TODO: Remove jobConst query when htcondor ticket is solved
        # https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=5175

        with HTCondorUtils.AuthenticatedSubprocess(self.proxy) as (parent, rpipe):
            if not parent:
                with self.schedd.transaction() as dummytsc:
                    self.schedd.act(htcondor.JobAction.Hold, rootConst)
                    self.schedd.act(htcondor.JobAction.Remove, jobConst)
        results = rpipe.read()
        if results != "OK":
            msg  = "The CRAB server backend was not able to kill the task,"
            msg += " because the Grid scheduler answered with an error."
            msg += " This is probably a temporary glitch. Please try again later."
            msg += " If the error persists send an e-mail to %s." % (FEEDBACKMAIL)
            msg += " Error reason: %s" % (results)
            raise TaskWorkerException(msg)
开发者ID:belforte,项目名称:CRABServer,代码行数:35,代码来源:DagmanKiller.py

示例15: execute_internal

# 需要导入模块: import HTCondorUtils [as 别名]
# 或者: from HTCondorUtils import quote [as 别名]
    def execute_internal(self, *args, **kw):
        #Marco: I guess these value errors only happens for development instances
        if 'task' not in kw:
            raise ValueError("No task specified.")
        task = kw['task']
        if 'tm_taskname' not in task:
            raise ValueError("No taskname specified.")
        workflow = str(task['tm_taskname'])
        if 'user_proxy' not in task:
            raise ValueError("No proxy provided")
        proxy = task['user_proxy']

        self.logger.info("About to resubmit workflow: %s." % workflow)
        self.logger.info("Task info: %s" % str(task))

        loc = HTCondorLocator.HTCondorLocator(self.backendurls)
        schedd, address = loc.getScheddObj(workflow) #TODO wrap

        # Release the DAG
        rootConst = "TaskType =?= \"ROOT\" && CRAB_ReqName =?= %s" % HTCondorUtils.quote(workflow)

        # Calculate a new white/blacklist
        ad = classad.ClassAd()
        ad['whitelist'] = task['resubmit_site_whitelist']
        ad['blacklist'] = task['resubmit_site_blacklist']

        if ('resubmit_ids' in task) and task['resubmit_ids']:
            ad['resubmit'] = task['resubmit_ids']
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    schedd.edit(rootConst, "HoldKillSig", 'SIGKILL')
                    schedd.edit(rootConst, "CRAB_ResubmitList", ad['resubmit'])
                    schedd.act(htcondor.JobAction.Hold, rootConst)
                    schedd.edit(rootConst, "HoldKillSig", 'SIGUSR1')
                    schedd.act(htcondor.JobAction.Release, rootConst)

        elif task['resubmit_site_whitelist'] or task['resubmit_site_blacklist'] or \
                task['resubmit_priority'] != None or task['resubmit_maxmemory'] != None or \
                task['resubmit_numcores'] != None or task['resubmit_maxjobruntime'] != None:
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    if task['resubmit_site_blacklist']:
                        schedd.edit(rootConst, "CRAB_SiteResubmitBlacklist", ad['blacklist'])
                    if task['resubmit_site_whitelist']:
                        schedd.edit(rootConst, "CRAB_SiteResubmitWhitelist", ad['whitelist'])
                    if task['resubmit_priority'] != None:
                        schedd.edit(rootConst, "JobPrio", task['resubmit_priority'])
                    if task['resubmit_numcores'] != None:
                        schedd.edit(rootConst, "RequestCpus", task['resubmit_numcores'])
                    if task['resubmit_maxjobruntime'] != None:
                        schedd.edit(rootConst, "MaxWallTimeMins", task['resubmit_maxjobruntime'])
                    if task['resubmit_maxmemory'] != None:
                        schedd.edit(rootConst, "RequestMemory", task['resubmit_maxmemory'])
                    schedd.act(htcondor.JobAction.Release, rootConst)

        else:
            with HTCondorUtils.AuthenticatedSubprocess(proxy) as (parent, rpipe):
                if not parent:
                    schedd.edit(rootConst, "HoldKillSig", 'SIGKILL')
                    schedd.edit(rootConst, "CRAB_ResubmitList", classad.ExprTree("true"))
                    schedd.act(htcondor.JobAction.Hold, rootConst)
                    schedd.edit(rootConst, "HoldKillSig", 'SIGUSR1')
                    schedd.act(htcondor.JobAction.Release, rootConst)

        results = rpipe.read()
        if results != "OK":
            raise TaskWorkerException("The CRAB3 server backend could not reubmit your task because the Grid scheduler answered with an error\n"+\
                                      "This is probably a temporary glitch, please try it again and contact an expert if the error persist\n"+\
                                      "Error reason %s" % results)
开发者ID:khurtado,项目名称:CRABServer,代码行数:71,代码来源:DagmanResubmitter.py


注:本文中的HTCondorUtils.quote方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。