当前位置: 首页>>代码示例>>Python>>正文


Python NTRC.tracef方法代码示例

本文整理汇总了Python中NewTraceFac.NTRC.tracef方法的典型用法代码示例。如果您正苦于以下问题:Python NTRC.tracef方法的具体用法?Python NTRC.tracef怎么用?Python NTRC.tracef使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在NewTraceFac.NTRC的用法示例。


在下文中一共展示了NTRC.tracef方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fntMatchValue

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def fntMatchValue(mysLine,mydVar):
    '''\
    Extract value from line according to valueregex for var.
     If no value found, supply suitably disappointing string.  
    Get the right word from the line.
     If asked for word zero, use the whole line.  
     Makes the extraction harder, but sometimes necessary.
    '''
    sWordnumber = mydVar["wordnumber"]
    nWordnumber = int(sWordnumber)
    lWords = mysLine.split()
    if nWordnumber == 0:
        sWord = mysLine
    elif nWordnumber <= len(lWords):
        sWord = lWords[nWordnumber-1]
    else: 
        sWord = "nowordhere_indexoutofrange"
    sValueregex = mydVar["valueregex"]
    sVarname = mydVar["varname"]
    oMatch = re.search(sValueregex,sWord)
    NTRC.tracef(5,"MCHV","proc MatchValue matching word var|%s| word|%s| valueregex|%s| matchobj|%s|" % (sVarname,sWord,sValueregex,oMatch))
    if oMatch:
        # Word matches the valueregex.  Save the value.
        sValue = oMatch.group(1)
        NTRC.tracef(3,"MCHV","proc addvalue name|%s| val|%s|" % (sVarname,sValue))
    else:
        # If not found, at least supply something conspicuous for printing.
        sValue = "novaluefound"
    return (sVarname,sValue)
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:31,代码来源:extractvalues2.py

示例2: mDestroyCopy

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
 def mDestroyCopy(self,mysCopyID):
     try:
         nCopyIndex = self.lCopyIDs.index(mysCopyID)
     except ValueError:
         NTRC.tracef(0, "SHLF", "BUGCHECK copyID not found for removal|%s|" 
             % (mysCopyID))
         return False
     # Remove doc and copy from current lists.  
     del self.lCopyIDs[nCopyIndex]
     del self.lDocIDs[nCopyIndex]
     # Tell the server that the copy is gone.
     cCopy = G.dID2Copy[mysCopyID]
     sDocID = cCopy.sDocID
     self.cServer.mDestroyCopy(mysCopyID, sDocID, self.ID)
     # And give back the space it occupied.  
     self.bContig = False
     cDoc = G.dID2Document[sDocID]
     
     # BZZZT: DO NOT put this region back into use.  It has already 
     # suffered an error once and caused a document to fail.  
     #self.nFreeSpace += cDoc.nSize
     NTRC.tracef(3, "SHLF", "proc mDestroyCopy remove doc|%s| copy|%s| "
         "idx|%d| size|%d| from shelf|%s| remainingdocs|%d| free|%d|" 
         % (cCopy.sDocID, mysCopyID, nCopyIndex, cDoc.nSize, self.ID, 
         len(self.lCopyIDs), self.nFreeSpace))
     # And, at long last, destroy the Copy oject itself.
     del cCopy
     return self.ID + "-" + sDocID + "-" + mysCopyID
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:30,代码来源:shelf.py

示例3: fndFormatQuery

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def fndFormatQuery(self, mydCli, myg):
        '''
        Take all the CLI options that might specify a searchable attribute, and 
         construct a MongoDB or searchspace query dictionary.  
         This is lots nastier than it first appears to be
         because json is so bloody picky.
        '''
        dOut = dict()
        for sAttrib,sValue in mydCli.items():
            result = None
            if sValue is not None:
                # Is it something valid in json?                
                try:
                    result = json.loads(sValue)
                except ValueError:
                    # Is it a string that should be an integer, ok in json?
                    try:
                        result = int(sValue)
                    except:
                        # Is it a naked string for some string-valued var
                        #  that isn't just Y/N or a mandatory string?  
                        #  Rule out dict values that are already formatted.
                        if (isinstance(sValue, str)
                            and sAttrib not in myg.lYesNoOptions
                            and sAttrib not in myg.lMandatoryArgs
                            and '{' not in sValue
                            and '}' not in sValue
                            and ':' not in sValue
                            and ',' not in sValue
                            ):
                            result = '{"$eq":' + '"'+sValue+'"' + '}'
                        else:
                            result = sValue
                    NTRC.tracef(3, "FMT", "proc FormatQuery notjson item "
                        "key|%s| val|%s| result|%s|" 
                        % (sAttrib, sValue, result))
            NTRC.tracef(3, "FMT", "proc FormatQuery item key|%s| val|%s| result|%s|" 
                % (sAttrib, sValue, result))
            # Can't process dicts thru json twice.
            if isinstance(result, dict):
                dOut[sAttrib] = sValue
            else:
                dOut[sAttrib] = result

        # Allow only attribs that appear in the database, else will get 
        #  no results due to implied AND of all items in query dict.  
        dOutSafe = {k:v for k,v in dOut.items() if k in myg.lSearchables}
        dOutNotNone = {k:v for k,v in dOutSafe.items() if v is not None}
        NTRC.ntracef(3,"FMT","proc dict b4|%s| \nsafe|%s|\nclean|%s|" 
            % (dOut,dOutSafe,dOutNotNone))
        if "sQuery" in dOutNotNone.keys():
            # If the brave user has supplied a full, standalone query string,
            #  add its contents to the query dict so far.
            dTmp = dOutNotNone["sQuery"]
            del dOutNotNone["sQuery"]
            dOutNotNone.update(dTmp)
        return dOutNotNone
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:59,代码来源:brokerformat.py

示例4: fntDoesLineMatchThisVar

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def fntDoesLineMatchThisVar(mysLine, mynLineNr, mysVarname):
    '''\
    Check line against lineregex of var.
    Return tuple (matchobject, line, varname).
    '''
    dVar = g.dVars[mysVarname]
    sLineregex = dVar["lineregex"]
    oMatch = re.search(sLineregex,mysLine)
    NTRC.tracef(5,"MTLN","proc MatchLine try regex|%s| var|%s| nr|%s| line|%s| match|%s|" % (sLineregex,mysVarname,mynLineNr,mysLine,oMatch))
    if oMatch:
        NTRC.tracef(3,"LINE","proc MatchLine found line|%s|=|%s| var|%s| regex|%s|" % (mynLineNr,mysLine,mysVarname,sLineregex))
    return (oMatch, mysLine, mysVarname)
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:14,代码来源:extractvalues2.py

示例5: fnldParseInput

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def fnldParseInput(mysFilename):
    ''' Return tuple containing
        - the output template string, 
        - a list, one item per line, of dicts of column args from the 
          csv that contain instructions for getting variable values
          from lines.  
        Beware duck-type integers that become strings.

        Format of csv lines:        
        varname,regex to find line,split word number,regex to strip out value

        instruction file format:

        ##becomes comment in output
        ###send out this string as header for the output, no hashes
        =outputformat
        format string
        =variables
        varname,lineregex,wordnumber,valueregex (header)
        (lines of csv data)

    '''
    dParams = dict()
    with open(mysFilename,"rb") as fhInfile:
        # Remove comments.  
        lLines = filter( lambda sLine:                          \
                        not re.match("^ *#[^#]",sLine)          \
                        and not re.match("^ *$",sLine.rstrip()) \
                        , fhInfile )

        # Get the output template.  It may be longer than one line.  
        lTemplate = fnlLinesInRange(lLines,"^=template","^=variables")
        lTemplate = map( lambda sLine: sLine.rstrip().replace("###","").replace("##","#"), lTemplate )
        NTRC.tracef(3,"INPT","proc ParseInput template|%s|" % (lTemplate))

        # Fix the separator in the template according to the user spec.
        lAllTemplateNames = [lTemplateLine.split() for lTemplateLine in lTemplate]
        lNewTemplate = [g.sSeparator.join(lTemplateNamesOneLine) \
            for lTemplateNamesOneLine in lAllTemplateNames]

        # Now get the CSV args into a dictionary of dictionaries.
        lVarLines = fnlLinesInRange(lLines,"^=variables","^=thiswillnotbefound")
        lRowDicts = csv.DictReader(lVarLines)
        NTRC.tracef(5,"INPT","proc ParseInput lRowDicts all|%s|" % (lRowDicts))
        
        dParams = dict( map( lambda dRowDict:   \
            (dRowDict["varname"],dRowDict)      \
            , lRowDicts ))

    return (lNewTemplate,dParams)
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:52,代码来源:extractvalues2.py

示例6: mAuditCollection

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def mAuditCollection(self, mynCycleInterval, mynSegments, mysCollectionID, 
            myeCallerSyncEvent):
        '''\
        SimPy generator to audit an entire collection.
        Divide the collection into segments and schedule audits
        for each segment in turn.
        '''
        fTimeCycleBegin = G.env.now
        lg.logInfo("AUDIT2","begin colln t|%10.3f| auditid|%s| cycle|%s| cli|%s| coll|%s|" % (G.env.now,self.ID,self.nNumberOfCycles,self.sClientID,self.sCollectionID))

        for iThisSegment in range(mynSegments):
            tSegmentStartTime = G.env.now
            nSegmentInterval = self.mCalcSegmentInterval(mynCycleInterval, 
                mynSegments)
            bLastSegment = (iThisSegment == mynSegments-1)

            self.lDocsThisSegment = self.mIdentifySegment(mysCollectionID, 
                mynSegments, iThisSegment)
            eSyncEvent = G.env.event()
            G.env.process(
                self.mAuditSegment(iThisSegment, self.lDocsThisSegment, 
                mysCollectionID, eSyncEvent))
            # Wait for completion of segment and its allotted time.
            yield eSyncEvent
            tNextSegmentStartTime = tSegmentStartTime + nSegmentInterval
            NTRC.tracef(3, "AUD2", "proc AuditCollection1 now|%s| tstart|%s| "
                "tnext|%s| tinterval|%s| blastseg|%s|" 
                % (G.env.now, tSegmentStartTime, tNextSegmentStartTime, 
                nSegmentInterval, bLastSegment))
            yield G.env.timeout(tNextSegmentStartTime - G.env.now)
        
        fTimeCycleEnd = G.env.now
        self.fTimeCycleLength = fTimeCycleEnd - fTimeCycleBegin
        lg.logInfo("AUDIT2", "end colln   t|%10.3f| auditid|%s| cycle|%s| "
            "cli|%s| coll|%s| repairs|%d| total|%d| perms|%d| "
            "majority|%s| minority|%d| duration|%9.3f|" 
            % (G.env.now, self.ID, self.nNumberOfCycles, self.sClientID, 
            self.sCollectionID, self.nRepairsThisCycle, self.nRepairsTotal, 
            self.nPermanentLosses, self.nRepairsMajority, 
            self.nRepairsMinority, self.fTimeCycleLength))
        # Tell the caller that we finished.
        myeCallerSyncEvent.succeed(value=self.nNumberOfCycles)
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:44,代码来源:audit2.py

示例7: mAddDocument

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def mAddDocument(self, mysDocID, mysClientID):
        ''' Add a document to this shelf and record some information
            in the document itself.
        '''
        self.lDocIDs.append(mysDocID)
        self.lDocIDsComplete.append(mysDocID)
        self.lClientIDs.append(mysClientID)
        cDoc = G.dID2Document[mysDocID]
        nSize = cDoc.nSize

        # Make a copy of the document and shelve that.  
        cCopy = CCopy(mysDocID, mysClientID, self.sServerID)
        sCopyID = cCopy.ID
        NTRC.tracef(3,"SHLF","proc mAddDocument made copy|%s| of doc|%s| "
            "from client|%s|" 
            % (sCopyID, mysDocID, mysClientID))

        # Where does document go on this shelf.  Closed interval [Begin,End].
#        nBlkBegin = self.nCapacity - self.nFreeSpace
        # BZZZT: Never reuse space.  Any empty space in the area that 
        # *used* to be occupied by documents has already been damaged
        # and destroyed a document.  Do not reuse the space.  
        # Yeah, I know it's all just hypothetical, but why not.  
        nBlkBegin = self.nHiWater + 1
        self.nFreeSpace -= nSize
        nBlkEnd = nBlkBegin + nSize - 1
        if nBlkEnd > self.nHiWater:
            self.nHiWater = nBlkEnd         # Last block used.  
#        sShelfID = self.ID
#        sServerID = self.sServerID
        cCopy.mShelveCopy(self.sServerID, self.ID, nBlkBegin, nBlkEnd)
        self.lCopyIDs.append(sCopyID)
        self.lCopyIDsComplete.append(sCopyID)
        self.lCopyTops.append(nBlkEnd)

        cDoc.mCopyPlacedOnServer(sCopyID, self.sServerID)
        NTRC.tracef(5,"SHLF","proc mAddDocument add doc|%s| to shelf|%s| "
            "size|%d| remaining|%d|" 
            % (mysDocID,self.ID,nSize,self.nFreeSpace))
        
        return self.sServerID+"+"+self.ID+"+"+mysDocID+"+"+sCopyID
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:43,代码来源:shelf.py

示例8: main

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def main(mysInputFilename):
    pass
    # Create output template.
    lTemplate = map(lambda field: ("{" + field + "}"), g.lCoreColumns)
    sTemplate = " ".join(lTemplate)

    # Process file.
    with open(mysInputFilename, "r") as fhIn:
        nErrors = 0
        lErrors = []
        oReader = csv.reader(fhIn, delimiter=g.sSeparator)
    
        # First line better be the header.
        lHeader = next(oReader)
        NTRC.tracef(3, "NARO", "proc lHeader|%s|" % (lHeader))
    
        # For each data line, create dict of values and map them into 
        #  the reduced-width output template.  
        print(g.sCoreColumns)
        nLine = 1               # Count the header line as 1.
        for lValues in oReader:
            NTRC.tracef(3, "NARO", "proc lValues|%s|" % (lValues))
            dValues = dict(zip(lHeader, lValues))
            NTRC.tracef(3, "NARO", "proc dValues|%s|" % (dValues))
            sOut = sTemplate.format(**dValues)
            nLine += 1
            print(sOut)
            if "nolinefound" in sOut:
                nErrors += 1
                lErrors.append(nLine)
    if nErrors > 0:
        print("#ERROR - MISSING DATA nolinefound at %s" % (lErrors))
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:34,代码来源:narrowfile.py

示例9: mAge_shelf

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def mAge_shelf(self, mynLifeParam):
        ''' An entire shelf fails.  Remove all the docs it contained.
            Eventually, this will trigger a repair event and make the 
            collection more vulnerable during the repair.  
        '''
        fShelfLife = util.makeexpo(mynLifeParam)
        lg.logInfo("SERVER", "mAge_shelf set lifetime time|%6.0f| shelf|%s| "
            "next lifetime|%.3f|khr" 
            % (G.env.now,self.ID,fShelfLife))
        NTRC.tracef(3, "SHLF", "proc mAge_shelf  time|%6.0f| shelf|%s| "
            "next lifetime|%.3f|khr" 
            % (G.env.now,self.ID,fShelfLife))
        yield G.env.timeout(fShelfLife)

        # S H E L F  F A I L S 
        G.nTimeLastEvent = G.env.now
        self.bAlive = False         # Shelf can no longer be used to store docs.
        NTRC.tracef(3, "SHLF", "proc mAge_shelf  time|%d| shelf|%s| shelf_error" 
            % (G.env.now,self.ID))
        lg.logInfo("SERVER", "storage shelf failed time|%6.0f| server|%s| "
            "shelf|%s| lost |%d| docs" 
            % (G.env.now,self.sServerID,self.ID,len(self.lCopyIDs)))
        # This whole shelf is a goner.  Kill it. 
        NTRC.tracef(5, "SHLF", "proc mAge_shelf kill contents ldocs|%s| "
            "lcopies|%s|" 
            % (self.lDocIDs,self.lCopyIDs)) 
        # Note that we have to copy the list before modifying it and 
        # iterate over the copy of the list.  
        # Standard problem with updating an iterable inside the for loop.
        templCopyIDs = copy.deepcopy(self.lCopyIDs)
        for sCopyID in templCopyIDs:
            sDocID = G.dID2Copy[sCopyID].sDocID
            self.mDestroyCopy(sCopyID)
#            G.dID2Server[self.sServerID].mDestroyDocument(sDocID,self.ID)
            G.dID2Server[self.sServerID].mDestroyCopy(sCopyID,sDocId,self.ID)
            self.mReportDocumentLost(sDocID)
        NTRC.tracef(3, "FAIL", "proc t|%d| shelf failure server|%s| qual|%d| "
            "shelf|%s| docs|%d|" 
            % (G.env.now, self.sServerID, G.dID2Server[self.sServerID].nQual, 
            self.ID,len(templCopyIDs)))
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:42,代码来源:shelf.py

示例10: logSetConfig

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def logSetConfig(mysLogLevel,mysLogFile):
    lLogLevels = 'NOTSET CRITICAL ERROR WARNING INFO DEBUG'.split()
    sLogLevel = mysLogLevel.upper()
    if sLogLevel not in lLogLevels:
        NTRC.tracef(0,"LGOU","ERROR unrecognized logging level|%s|" % (mysLogLevel))
        sLogLevel = "NOTSET"

    # Set the logging level for this session.
    NTRC.tracef(3,"LGOU","proc sLogLevel|%s|"%(sLogLevel))
    logger.setLevel(sLogLevel.upper())

    ''' Set the output file for logging.
        Either to a filename in LOG_FILE param or environ variable, 
        or to the console using StreamHandler.
    '''
    if  mysLogFile != ""    \
    and mysLogFile != " "   \
    and mysLogFile != "-"   \
    and mysLogFile.upper() != "NONE"    \
    and mysLogFile.upper() != "CONSOLE" \
    and mysLogFile.upper() != "STDOUT"  :
        channel = logging.FileHandler(mysLogFile)
    else:
        channel = logging.StreamHandler()
    NTRC.tracef(3,"LGOU","proc set log handler mysLogFile|%s|" % (mysLogFile))

    ''' Adjust the format of log output to match the time stamps
        we have used in TRACE forever.  
    '''
    # Create formatter instance.
    formatter = logging.Formatter(fmt='%(asctime)s %(name)s %(levelname)s - %(message)s', datefmt='%Y%m%d_%H%M%S')
    # Add formatter to the output channel.
    channel.setFormatter(formatter)
    # Finally, add the channel handler to the logger.
    logger.addHandler(channel)
    
    return logger
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:39,代码来源:logoutput.py

示例11: mSelectVictimCopy

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def mSelectVictimCopy(self, mynErrorSize):
        ''' Which doc copy on this shelf, if any, was hit by this error?
            Throw a uniform dart at all the docs on the shelf, see 
            which one gets hit, or dart falls into empty space.  Doc size counts.  
        '''
        nRandomSpot = util.makeunif(1, self.nCapacity + mynErrorSize - 1)
        nLoc = 0
        NTRC.tracef(5, "SHLF", "proc SelectVictimCopy0 wherehit spot|%s| "
            "hiwater|%s|  shelfid|%s| capacity|%s|" 
            % (nRandomSpot,self.nHiWater,self.ID,self.nCapacity))
        # First, check to see if the failure is maybe in an occupied region.  
        if nRandomSpot <= self.nHiWater:
            # Find the document hit by the error.  May have been hit before, too.  
            # New version, vanilla binary search with adjacent interval checking
            #  on list of all locations assigned on this shelf.
            # After you find the location, check to see that it 
            #  is still occupied by live copy.  
            nLen = len(self.lCopyIDsComplete)
            nDist = (nLen + 1) / 2
            nLoc = nDist
            NTRC.tracef(5, "SHLF", "proc SelectVictimCopy0 searchsetup len|%s| "
                "loc|%s| dist|%s|" 
                % (nLen, nLoc, nDist))
            while 1:
                if nLoc <= 0: nLoc = 1
                if nLoc >= nLen: nLoc = nLen - 1
                nDist = (nDist + 1) / 2
                if nDist == 0: nDist = 1

                nTop = self.lCopyTops[nLoc]
                nBottom = self.lCopyTops[nLoc-1]
                sCopyID = self.lCopyIDsComplete[nLoc-1]
                sDocID = self.lDocIDsComplete[nLoc-1]
                cCopy = G.dID2Copy[sCopyID]

                if nRandomSpot <= nTop:
                    # Lower than top, look down.
                    if nRandomSpot >= nBottom:
                        # Found to left of nLoc.  
                        NTRC.tracef(5, "SHLF", "proc SelectVictimCopy5D "
                            "found victim id|%s| at spot|%s| in[%s,%s]| " 
                            "doc|%s|" 
                            % (sCopyID, nRandomSpot, nBottom, nTop, sDocID))
                        # Is this slot still occupied by a live copy?
                        if sCopyID in self.lCopyIDs:
                            sVictimID = sCopyID
                            NTRC.tracef(3, "SHLF", "proc mSelectVictimCopy "
                                "NEWD end shelf|%s| spot|%d| hits doc|%s| "
                                "placed[%d,%d] size|%d| outof|%d|" 
                                % (self.ID, nRandomSpot, sVictimID, 
                                cCopy.nBlkBegin, cCopy.nBlkEnd, 
                                (cCopy.nBlkEnd-cCopy.nBlkBegin+1), 
                                self.nCapacity))
                        else:
                            sVictimID = None
                            NTRC.tracef(5, "SHLF", "proc SelectVictimCopy2D "
                                "no longer valid copyid|%s| docid|%s|" 
                                % (sCopyID, sDocID))
                            self.nMultipleHits += 1
                        break
                    else:
                        nLoc -= nDist
                        NTRC.tracef(5, "SHLF", "proc SelectVictimCopy3D "
                            "down spot|%s| intvl|[%s,%s| newloc|%s| newdist|%s|" 
                            % (nRandomSpot, nBottom, nTop, nLoc, nDist))
                else:
                    # Higher than top, look up.
                    if nRandomSpot <= self.lCopyTops[nLoc+1]:
                        # Found to right of nLoc.
                        # Reevaluate ids and locations to the next slot 
                        #  on the right.  
                        sCopyID = self.lCopyIDsComplete[nLoc+1-1]
                        sDocID = self.lDocIDsComplete[nLoc+1-1]
                        cCopy = G.dID2Copy[sCopyID]
                        nBottom = self.lCopyTops[nLoc+1-1]
                        sCopyID = self.lCopyIDsComplete[nLoc+1-1]
                        NTRC.tracef(5, "SHLF", "proc SelectVictimCopy5U "
                            "found victim id|%s| at spot|%s| in[%s,%s]| doc|%s|" 
                            % (sCopyID, nRandomSpot, nBottom, nTop, sDocID))
                        # Is this slot still occupied by a live copy?
                        if sCopyID in self.lCopyIDs:
                            sVictimID = sCopyID
                            NTRC.tracef(3, "SHLF", "proc mSelectVictimCopy NEWU "
                                "end shelf|%s| spot|%d| hits doc|%s| "
                                "placed[%d,%d] size|%d| outof|%d|" 
                                % (self.ID, nRandomSpot, sVictimID, 
                                cCopy.nBlkBegin, cCopy.nBlkEnd, 
                                (cCopy.nBlkEnd-cCopy.nBlkBegin+1), 
                                self.nCapacity))
                        else:
                            sVictimID = None
                            NTRC.tracef(5, "SHLF", "proc SelectVictimCopy2U "
                                "no longer valid copyid|%s| docid|%s|" 
                                % (sCopyID, sDocID))
                            self.nMultipleHits += 1
                        break
                    else:
                        nLoc += nDist
                        NTRC.tracef(5, "SHLF", "proc SelectVictimCopy3U up   "
                            "spot|%s| intvl|[%s,%s| newloc|%s| newdist|%s|" 
#.........这里部分代码省略.........
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:103,代码来源:shelf.py

示例12: mAge_sector

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
    def mAge_sector(self):
        ''' A sector in the shelf fails.  This corrupts a document.
            For the moment, assume that it destroys the document.  
            Eventually, it will have a probability of destroying the 
            document depending on the portion of the document 
            corrupted and the sensitivity of the document to corruption
            (e.g., compressed or encrypted), or the failure hits an
            encryption or license key.  
        '''
        # If the shelf has been emptied by a shelf failure, stop 
        # caring about sector failures.
        while self.bAlive:
            # Sector lifetime depends on shelf lifetime and glitch age.
            fNow = G.env.now
            cLifetime = G.dID2Lifetime[self.sSectorLifetimeID]
            fLifetimeNow = cLifetime.mfCalcCurrentSectorLifetime(fNow)
            fSectorLifeInterval = util.makeexpo(fLifetimeNow)
            NTRC.tracef(3, "SHLF", "proc mAge_sector time|%d| shelf|%s| "
                "next interval|%.3f|hr from life rate|%.3f|hr" 
                % (G.env.now, self.ID, fSectorLifeInterval, fLifetimeNow))
            yield G.env.timeout(fSectorLifeInterval)

            # S E C T O R  E R R O R
            self.nSectorHits += 1
            G.nTimeLastEvent = G.env.now
            NTRC.tracef(3, "SHLF", "proc mAge_sector time|%d| shelf|%s| "
                "Sector_error hits|%d| emptyhits|%d|" 
                % (G.env.now, self.ID, self.nSectorHits, self.nEmptySectorHits))

            # Select a victim Document, probability proportional to size.
            # Small error, size=1.  What doc dies as a result?
            sCopyVictimID = self.mSelectVictimCopy(mynErrorSize=1)

            # New version: compress strings of consecutive misses into single line.
            # Normally we log one line per error regardless of whether it hits or 
            # misses a document.  That results in hideously long log files for 
            # sparse storage structures, like small docs on large shelf. 
            # Count consecutive misses, and issue one summary line before the 
            # next hit.
            # CANDIDATE FOR REFACTORING
            if sCopyVictimID:               # Hidden error in victim doc.
                # Destroy copy on this shelf.
                cCopy = G.dID2Copy[sCopyVictimID]
                sDocID = cCopy.mGetDocID()
                self.mDestroyCopy(sCopyVictimID)
                # Log the summary line if we just ended a string of misses
                if self.nConsecutiveMisses > 0:
                    lg.logInfo("SERVER", "small error t|%6.0f| svr|%s| "
                        "shelf|%s| consecutive misses|%d|" 
                        % (G.env.now, self.sServerID, self.ID, 
                        self.nConsecutiveMisses))
                self.nConsecutiveMisses = 0
                lg.logInfo("SERVER", "small error t|%6.0f| svr|%s| "
                    "shelf|%s| hidden failure in copy|%s| doc|%s|" 
                    % (G.env.now,self.sServerID,self.ID,sCopyVictimID,sDocID))
                NTRC.tracef(3, "FAIL", "proc t|%d| sector failure server|%s| "
                    "qual|%d| shelf|%s| doc|%s| copy|%s|" 
                    % (G.env.now, self.sServerID, 
                    G.dID2Server[self.sServerID].nQual, self.ID, sDocID, 
                    sCopyVictimID))
            else:                           # No victim, hit empty space.
                self.nEmptySectorHits += 1
                NTRC.tracef(3, "SHLF", "proc mAge_sector shelf|%s| "
                    "sector error fell in empty space" 
                    % (self.ID))
                if self.nConsecutiveMisses == 0:
                    lg.logInfo("SERVER", "small error t|%6.0f| svr|%s| "
                        "shelf|%s| hidden failure in copy|%s|" 
                        % (G.env.now, self.sServerID, self.ID, sCopyVictimID))
                self.nConsecutiveMisses += 1
                NTRC.tracef(3, "FAIL", "proc t|%d| sector failure server|%s| "
                    "qual|%d| shelf|%s| copy|%s|" 
                    % (G.env.now, self.sServerID, 
                    G.dID2Server[self.sServerID].nQual, self.ID, sCopyVictimID))
            # Initiate a repair of the dead document.
            # BZZZT NYI: currently all such failures are silent, so they are 
            #  not detected by the client until audited (or end of run).  
        # Shelf is no longer alive, so we do not notice or schedule 
        #  future sector errors.  Log the event.  
        lg.logInfo("SHELF ", "t|%6.0f| dead shelf|%s| of svr|%s|, "
            "no future errors" 
            % (G.env.now, self.ID, self.sServerID))
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:84,代码来源:shelf.py

示例13: main

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def main():
    '''
    Process:
    - Parse the CLI command into g.various data items.
    - Validate user-supplied directories; get environment variables.
    - Query the searchspace for the stream of instructions
    - For each instruction from database selection, get dict for line
    - Using dict args, construct plausible command lines, into file
    - Check to see that there aren't too many similar processes 
      already running; if too many, then wait.
    - Launch ListActor process to execute commands.
    - Wait a polite interval before launching another.
    '''
    NTRC.ntracef(0, "MAIN", "Begin.")
    NTRC.ntracef(0, "MAIN", "TRACE  traceproduction|%s|" % NTRC.isProduction())

    sBrokerCommand = fnsReconstituteCommand(sys.argv)
    fnbMaybeLogCommand(sBrokerCommand)
    NTRC.ntracef(0, "MAIN", "command=|%s|" % (sBrokerCommand.rstrip()))

    # Get args from CLI and put them into the global data
    dCliDict = brokercli.fndCliParse("")
    # Carefully insert any new CLI values into the Global object.  
    dCliDictClean = {k:util.fnIntPlease(v) for k,v in dCliDict.items() 
                        if v is not None}
    g.__dict__.update(dCliDictClean)

    # Validate that the user-specified directories exist.
    if not fnbValidateDir(g.sFamilyDir):
        raise ValueError("FamilyDir \"%s\" not found" % (g.sFamilyDir))
    if not fnbValidateDir("%s/%s" % (g.sFamilyDir, g.sSpecificDir)):
        raise ValueError("SpecificDir \"%s\" not found" % (g.sSpecificDir))

    # Get command templates from external file.
    fnGetCommandTemplates(g.sCommandListFilename)

    # Construct database query for this invocation.
    g.cFmt = brokerformat.CFormat()
    dQuery = g.cFmt.fndFormatQuery(dCliDict, g)

    # Look for overriding environment variables
    fnvGetEnvironmentOverrides()

    # Open the database to keep "done" records,
    #  and delete moldy, old in-progress records.
    g.mdb = searchdatabasemongo.CSearchDatabase(g.sSearchDbMongoName, 
                g.sSearchDbProgressCollectionName, 
                g.sSearchDbDoneCollectionName)
    g.mdb.fnvDeleteProgressCollection()
    
    # Get the set of instructions for today from database.
    NTRC.tracef(0,"MAIN","proc querydict2|%s|" % ((dQuery)))
    itAllInstructions = searchspace.fndgGetSearchSpace(g.sInsDir, g.sInsTyp, 
                        dQuery)
    nRuns = fnnProcessAllInstructions(itAllInstructions)
    
    # If this wasn't just a listonly run, do all the cases.  
    if not g.sListOnly.startswith("Y"):
        NTRC.ntracef(3, "MAIN", "proc all instr|%s|" % (g.lGiantInstr))
        nCases = nb.fntRunEverything(g, iter(g.lGiantInstr)
                                , g.nCoreTimer, g.nStuckLimit)
    else:
        nCases = len(g.lGiantInstr)
    NTRC.ntracef(0, "MAIN", "End ncases|%s|" % (nCases,))
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:66,代码来源:broker2.py

示例14: main

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def main():
    '''
    Process:
    Open the file given on the command line.
    Open the database given on the command line.
    Read the two lines from the file.
    If the sDoneId(=mongoid) already appears in the done collection 
     of the database, 
    Then    end.
    Else    dictionary-ify the data (maybe csvreader already did that for us).
            add the dict to the done collection, including the sDoneId field.
            end.
    '''
    NTRC.ntracef(0,"DCLN","datacleanup Begin.")
    # Get args from CLI and put them into the global data
    dCliDict = fndCliParse("")
    # Carefully insert any new CLI values into the Global object.
    dCliDictClean = {k:v for k,v in dCliDict.items() if v is not None}
    g.__dict__.update(dCliDictClean)

    # Get data from the extract file: one line of header, one line of data.
    with open(g.sInputFilename,'r') as fhInput:
        oReader = csv.reader(fhInput, delimiter=g.sSeparator)
        lHeader = oReader.next()
        lValues = oReader.next()
        NTRC.tracef(3, "DCLN", "proc lHeader|%s|" % (lHeader))
        NTRC.tracef(3, "DCLN", "proc lValues|%s|" % (lValues))
    dValues = dict(zip(lHeader, lValues))
    NTRC.tracef(3, "DCLN", "proc dValues|%s|" % (dValues))
    
    # Open the SearchDatabase for done and progress records.
    g.mdb = searchdatabasemongo.CSearchDatabase(g.sSearchDbMongoName, 
            g.sProgressCollectionName, 
            g.sDoneCollectionName)
    # Construct database query for this invocation.  
    sInstructionId = dValues["mongoid"]
    sLineOut = g.sSeparator.join(lValues)
    NTRC.tracef(0,"DCLN","proc looking for done recd|%s|" 
        % (sInstructionId))

    # If this extract is already stored in the database, don't do it again.  
    bIsItDone = g.mdb.fnbIsItDone(sInstructionId)
    if not bIsItDone:
        # If case not already done, add data line to the giant output file.
        # But first, ...
        # If the output file does not exist, or is empty, write the header line
        #  in first before the data line.  
        # (If the file does not exist, open mode=a will create an empty one.)
        with open(g.sGiantOutputFilename,'a') as fhOutput:
            if not os.stat(g.sGiantOutputFilename).st_size:
                sHeaderLine = g.sSeparator.join(lHeader)
                fhOutput.write(sHeaderLine + "\n")
                NTRC.tracef(3, "DCLN", "proc wroteheaderline|%s|" 
                    % (sHeaderLine))
            fhOutput.write(sLineOut + "\n")
            NTRC.tracef(0, "DCLN", "proc line appended to output \nsLineOut|%s|" 
                % (sLineOut))

        # Probably record the done record in db.
        if g.sDoNotRecord.startswith("Y"):
            NTRC.tracef(0, "DCLN", "proc Done not recorded.")
        else:
            dResult = g.mdb.fndInsertDoneRecord(sInstructionId, dValues)

        # Probably delete the extract file.
        if g.sDoNotDelete.startswith("Y"):
            NTRC.tracef(0, "DCLN", "proc Input file not deleted.")
        else:
            os.remove(g.sInputFilename)
            NTRC.tracef(3,"DCLN", "proc fileremoved|%s|" 
                % (g.sInputFilename))
            # And remove its in-progress record from the search db.
            g.mdb.fndDeleteProgressRecord(sInstructionId)
    else:
        # Duplicate instruction; do not add line to output file.
        NTRC.tracef(0, "DCLN", "proc line NOT appended to output file \n"
            "sLineOut|%s|" 
            % (sLineOut))

    NTRC.ntracef(0,"DCLN","datacleanup End.")
    return 0
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:83,代码来源:datacleanup.py

示例15: main

# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import tracef [as 别名]
def main(mysInstructionsFileName,mysLogFileName):
    (lTemplate,g.dVars) = fnldParseInput(mysInstructionsFileName)
    lLines = list()
    with open(mysLogFileName,"r") as fhLogFile:

        '''\
        get list of tuples: lines that match some lineregex, for which var

        foreach line, 
            if matches any lineregex
                extract value, 
                put varname and value in dictionary

        be careful never to form a list of lines of the input log file, 
         or of anything that is big-O of that.  filter first.
        '''

        # Form list of all lines that match some var.
        nLineNr = 0
        lLinesSelectedRaw = list()
        for sLine in fhLogFile:
            nLineNr += 1                # Need line nr only for debugging.
            for sVarname in g.dVars.keys():
                tResult = fntDoesLineMatchThisVar(sLine, nLineNr, sVarname)
                # If line matches any var, save the line and the varname.
                if tResult[0]: 
                    lLinesSelectedRaw.append(tResult)
        NTRC.tracef(3,"MN2","proc lLinesSelectedRaw len|%s| all|%s|" % (len(lLinesSelectedRaw),lLinesSelectedRaw))

    # Eliminate duplicates.  Should not be any if the lineregexes are 
    #  specific enough.  
    lLinesSelected = list(set(lLinesSelectedRaw))
    NTRC.tracef(5,"MN3","proc lLinesSelected len|%s| all|%s|" % (len(lLinesSelected),lLinesSelected))

    # Extract variable value from each matching line.
    # List of lines selected is actually a list of triples.
    lResults = map( lambda (omatch, sLine, sVarname): 
                fntMatchValue(sLine, g.dVars[sVarname])
                , lLinesSelected )
    # Returned list of (name,val) tuples for vars in lines selected.
    #  Make a dictionary.  
    dValues = dict(lResults)

    # In case we did not find the line for a variable, dummy up a value.
    for sKey in g.dVars: 
        dValues.setdefault(sKey,"nolinefound")

    # And in case we didn't even find a rule for some variable that
    #  will be used in the template, dummy up a value for it, too.  
    sTemplateHeader = "\n".join(lTemplate).replace("{","").replace("}","").replace("\n"," ")
    lTemplateVars = sTemplateHeader.split()
    for sTemplateVar in lTemplateVars: 
        dValues.setdefault(sTemplateVar,"norulefound")

    # Add the synthetic variables to the value dictionary.
    dSyntho = fndGetSyntheticVars()
    dValues.update(dSyntho)

    # Make the seed value, at least, print constant width for legibility.  
    sSeed = dValues["seed"]
    sSeednew = "%09d" % (int(sSeed))
    dValues["seed"] = sSeednew

    # Fill in the template with values and print.  
    # Template is allowed to be multiple lines.
    sTemplate = "\n".join(lTemplate)
    sLineout = makeCmd(sTemplate,dValues)
    if g.bHeader or os.environ.get("header",None):
        # Header is a single line concatenation of all the substitutions
        #  in the template.
        #  If the template is longer than one line, well, you can't read 
        #  the data with a simple header anyway.  Oops.  
        sHeader = sTemplateHeader
        print sHeader
    # Newline already pasted on the end of template; don't add another.
    print sLineout,
开发者ID:MIT-Informatics,项目名称:PreservationSimulation,代码行数:78,代码来源:extractvalues2.py


注:本文中的NewTraceFac.NTRC.tracef方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。