当前位置: 首页>>代码示例>>Python>>正文


Python ClientJobsDAO.ClientJobsDAO类代码示例

本文整理汇总了Python中nupic.database.ClientJobsDAO.ClientJobsDAO的典型用法代码示例。如果您正苦于以下问题:Python ClientJobsDAO类的具体用法?Python ClientJobsDAO怎么用?Python ClientJobsDAO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ClientJobsDAO类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getSwarmModelParams

def getSwarmModelParams(modelID):
  """Retrieve the Engine-level model params from a Swarm model
  
  Args:
    modelID - Engine-level model ID of the Swarm model
  
  Returns:
    JSON-encoded string containing Model Params
  """
  
  # TODO: the use of opfhelpers.loadExperimentDescriptionScriptFromDir when
  #  retrieving module params results in a leakage of pf_base_descriptionNN and
  #  pf_descriptionNN module imports for every call to getSwarmModelParams, so
  #  the leakage is unlimited when getSwarmModelParams is called by a
  #  long-running process such as grok-api-server. This issue is presently being
  #  tracked by the JIRA: https://issues.numenta.org/browse/NPC-225. An
  #  alternate solution is to execute the guts of this function's logic in a
  #  seprate process (via multiprocessing module).
  
  cjDAO = ClientJobsDAO.get()
  
  (jobID, description) = cjDAO.modelsGetFields(
    modelID,
    ["jobId", "genDescription"])
  
  (baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])
  
  # Construct a directory with base.py and description.py for loading model
  # params, and use opfhelpers to extract model params from those files
  descriptionDirectory = tempfile.mkdtemp()
  try:
    baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
    with open(baseDescriptionFilePath, mode="wb") as f:
      f.write(baseDescription)
    
    descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
    with open(descriptionFilePath, mode="wb") as f:
      f.write(description)
    
    expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
      opfhelpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))
    
    return json.dumps(
      dict(
        modelConfig=expIface.getModelDescription(),
        inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
  finally:
    shutil.rmtree(descriptionDirectory, ignore_errors=True)
开发者ID:ChiralBehaviors,项目名称:nupic,代码行数:48,代码来源:api.py

示例2: createAndStartSwarm

def createAndStartSwarm(client, clientInfo="", clientKey="", params="",
                        minimumWorkers=None, maximumWorkers=None,
                        alreadyRunning=False):
  """Create and start a swarm job.

  Args:
    client - A string identifying the calling client. There is a small limit
        for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.
    clientInfo - JSON encoded dict of client specific information.
    clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.
    params - JSON encoded dict of the parameters for the job. This can be
        fetched out of the database by the worker processes based on the jobID.
    minimumWorkers - The minimum workers to allocate to the swarm. Set to None
        to use the default.
    maximumWorkers - The maximum workers to allocate to the swarm. Set to None
        to use the swarm default. Set to 0 to use the maximum scheduler value.
    alreadyRunning - Insert a job record for an already running process. Used
        for testing.
  """
  if minimumWorkers is None:
    minimumWorkers = Configuration.getInt(
        "nupic.hypersearch.minWorkersPerSwarm")
  if maximumWorkers is None:
    maximumWorkers = Configuration.getInt(
        "nupic.hypersearch.maxWorkersPerSwarm")

  return ClientJobsDAO.get().jobInsert(
      client=client,
      cmdLine="$HYPERSEARCH",
      clientInfo=clientInfo,
      clientKey=clientKey,
      alreadyRunning=alreadyRunning,
      params=params,
      minimumWorkers=minimumWorkers,
      maximumWorkers=maximumWorkers,
      jobType=ClientJobsDAO.JOB_TYPE_HS)
开发者ID:ChiralBehaviors,项目名称:nupic,代码行数:36,代码来源:api.py

示例3: main


#.........这里部分代码省略.........
    parser.add_option(
        "--modelID",
        action="store",
        type="str",
        default=None,
        help=(
            "Tell worker to re-run this model ID. When specified, jobID " "must also be specified [default: %default]."
        ),
    )

    parser.add_option(
        "--workerID",
        action="store",
        type="str",
        default=None,
        help=(
            "workerID of the scheduler's SlotAgent (GenericWorker) that "
            "hosts this SpecializedWorker [default: %default]."
        ),
    )

    parser.add_option(
        "--params",
        action="store",
        default=None,
        help="Create and execute a new hypersearch request using this JSON "
        "format params string. This is helpful for unit tests and debugging. "
        "When specified jobID must NOT be specified. [default: %default].",
    )

    parser.add_option(
        "--clearModels",
        action="store_true",
        default=False,
        help="clear out the models table before starting [default: %default].",
    )

    parser.add_option(
        "--resetJobStatus",
        action="store_true",
        default=False,
        help="Reset the job status before starting  [default: %default].",
    )

    parser.add_option(
        "--logLevel",
        action="store",
        type="int",
        default=None,
        help="override default log level. Pass in an integer value that "
        "represents the desired logging level (10=logging.DEBUG, "
        "20=logging.INFO, etc.) [default: %default].",
    )

    # Evaluate command line arguments
    (options, args) = parser.parse_args(argv[1:])
    if len(args) != 0:
        raise RuntimeError("Expected no command line arguments, but got: %s" % (args))

    if options.jobID and options.params:
        raise RuntimeError("--jobID and --params can not be used at the same time")

    if options.jobID is None and options.params is None:
        raise RuntimeError("Either --jobID or --params must be specified.")

    initLogging(verbose=True)

    # Instantiate the HypersearchWorker and run it
    hst = HypersearchWorker(options, argv[1:])

    # Normal use. This is one of among a number of workers. If we encounter
    #  an exception at the outer loop here, we fail the entire job.
    if options.params is None:
        try:
            jobID = hst.run()

        except Exception, e:
            jobID = options.jobID
            msg = StringIO.StringIO()
            print >> msg, "%s: Exception occurred in Hypersearch Worker: %r" % (ErrorCodes.hypersearchLogicErr, e)
            traceback.print_exc(None, msg)

            completionReason = ClientJobsDAO.CMPL_REASON_ERROR
            completionMsg = msg.getvalue()
            hst.logger.error(completionMsg)

            # If no other worker already marked the job as failed, do so now.
            jobsDAO = ClientJobsDAO.get()
            workerCmpReason = jobsDAO.jobGetFields(options.jobID, ["workerCompletionReason"])[0]
            if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
                jobsDAO.jobSetFields(
                    options.jobID,
                    fields=dict(
                        cancel=True,
                        workerCompletionReason=ClientJobsDAO.CMPL_REASON_ERROR,
                        workerCompletionMsg=completionMsg,
                    ),
                    useConnectionID=False,
                    ignoreUnchanged=True,
                )
开发者ID:arabenjamin,项目名称:nupic,代码行数:101,代码来源:HypersearchWorker.py

示例4: run

    def run(self):
        """ Run this worker.

    Parameters:
    ----------------------------------------------------------------------
    retval:     jobID of the job we ran. This is used by unit test code
                  when calling this working using the --params command
                  line option (which tells this worker to insert the job
                  itself).
    """
        # Easier access to options
        options = self._options

        # ---------------------------------------------------------------------
        # Connect to the jobs database
        self.logger.info("Connecting to the jobs database")
        cjDAO = ClientJobsDAO.get()

        # Get our worker ID
        self._workerID = cjDAO.getConnectionID()

        if options.clearModels:
            cjDAO.modelsClearAll()

        # -------------------------------------------------------------------------
        # if params were specified on the command line, insert a new job using
        #  them.
        if options.params is not None:
            options.jobID = cjDAO.jobInsert(
                client="hwTest",
                cmdLine="echo 'test mode'",
                params=options.params,
                alreadyRunning=True,
                minimumWorkers=1,
                maximumWorkers=1,
                jobType=cjDAO.JOB_TYPE_HS,
            )
        if options.workerID is not None:
            wID = options.workerID
        else:
            wID = self._workerID

        buildID = Configuration.get("nupic.software.buildNumber", "N/A")
        logPrefix = "<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> " % (buildID, wID, options.jobID)
        ExtendedLogger.setLogPrefix(logPrefix)

        # ---------------------------------------------------------------------
        # Get the search parameters
        # If asked to reset the job status, do that now
        if options.resetJobStatus:
            cjDAO.jobSetFields(
                options.jobID,
                fields={
                    "workerCompletionReason": ClientJobsDAO.CMPL_REASON_SUCCESS,
                    "cancel": False,
                    #'engWorkerState': None
                },
                useConnectionID=False,
                ignoreUnchanged=True,
            )
        jobInfo = cjDAO.jobInfo(options.jobID)
        self.logger.info("Job info retrieved: %s" % (str(clippedObj(jobInfo))))

        # ---------------------------------------------------------------------
        # Instantiate the Hypersearch object, which will handle the logic of
        #  which models to create when we need more to evaluate.
        jobParams = json.loads(jobInfo.params)

        # Validate job params
        jsonSchemaPath = os.path.join(os.path.dirname(__file__), "jsonschema", "jobParamsSchema.json")
        validate(jobParams, schemaPath=jsonSchemaPath)

        hsVersion = jobParams.get("hsVersion", None)
        if hsVersion == "v2":
            self._hs = HypersearchV2(
                searchParams=jobParams,
                workerID=self._workerID,
                cjDAO=cjDAO,
                jobID=options.jobID,
                logLevel=options.logLevel,
            )
        else:
            raise RuntimeError("Invalid Hypersearch implementation (%s) specified" % (hsVersion))

        # =====================================================================
        # The main loop.
        try:
            exit = False
            numModelsTotal = 0
            print >>sys.stderr, "reporter:status:Evaluating first model..."
            while not exit:

                # ------------------------------------------------------------------
                # Choose a model to evaluate
                batchSize = 10  # How many to try at a time.
                modelIDToRun = None
                while modelIDToRun is None:

                    if options.modelID is None:
                        # -----------------------------------------------------------------
#.........这里部分代码省略.........
开发者ID:arabenjamin,项目名称:nupic,代码行数:101,代码来源:HypersearchWorker.py

示例5: run

  def run(self):
    """ Run this worker.

    Parameters:
    ----------------------------------------------------------------------
    retval:     jobID of the job we ran. This is used by unit test code
                  when calling this working using the --params command
                  line option (which tells this worker to insert the job
                  itself).
    """
    # Easier access to options
    options = self._options

    # ---------------------------------------------------------------------
    # Connect to the jobs database
    self.logger.info("Connecting to the jobs database")
    cjDAO = ClientJobsDAO.get()

    # Get our worker ID
    self._workerID = cjDAO.getConnectionID()


    # -------------------------------------------------------------------------
    # if params were specified on the command line, insert a new job using
    #  them.
    if options.params is not None:
      options.jobID = cjDAO.jobInsert(client='dummy',
                  cmdLine="python -m nupic.swarming.DummyWorker --jobID={JOBID}",
                  params=options.params)



    # ---------------------------------------------------------------------
    # Get the search parameters
    jobInfo = cjDAO.jobInfo(options.jobID)
    self.logger.info("Job info retrieved: %s" % (str(jobInfo)))
    if options.workerID is not None:
      wID = options.workerID
    else:
      wID = self._workerID
    
    buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
    logPrefix = '<BUILDID=%s, WORKER=DW, WRKID=%s, JOBID=%s> ' % \
                (buildID, wID, options.jobID)
    ExtendedLogger.setLogPrefix(logPrefix)


    # ---------------------------------------------------------------------
    # Instantiate the Dummy object, which will handle the logic of
    #  which models to create when we need more to evaluate.
    jobParams = json.loads(jobInfo.params)
    self.logger.info("Job Params: %s" % jobInfo.params)

    # prints the current status
    print >>sys.stderr, "reporter:status:Running dummy worker on job:%d" % \
                                                    (options.jobID)


    self.logger.info("Start of the dummy worker")
    startTime = time.time()
    runTime = jobParams['runTime']
    jobLoad = jobParams['load']
    crashJob = jobParams['crash']

    try:
      while True:
        if runTime != -1 and time.time() > startTime + runTime:
          break
        self.logger.info("In dummy worker")
        if jobLoad == 'heavy':
          # Computationally intensive process
          # Takes 0.8 sec approximately
          numIterations = 30000
          for i in range(numIterations):
            d = numpy.random.rand(1000).sum()
        else:
          time.sleep(0.8)
    except:
      self.logger.exception("DummyWorker exception;")

    if crashJob:
      self.logger.info("Crash of the dummy worker")
      print >>sys.stderr, "reporter:status:Crashed dummy worker..."
      raise RuntimeError("Simulating job crash.")
    else:
      self.logger.info("End of the dummy worker")
      print >>sys.stderr, "reporter:status:Finished dummy worker..."

    #import auxilary
    #auxilary.do_something()

    return options.jobID
开发者ID:AndreCAndersen,项目名称:nupic,代码行数:92,代码来源:DummyWorker.py


注:本文中的nupic.database.ClientJobsDAO.ClientJobsDAO类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。