本文整理汇总了Python中NewTraceFac.NTRC.ntracef方法的典型用法代码示例。如果您正苦于以下问题:Python NTRC.ntracef方法的具体用法?Python NTRC.ntracef怎么用?Python NTRC.ntracef使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NewTraceFac.NTRC
的用法示例。
在下文中一共展示了NTRC.ntracef方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def __init__(self,size,mysClientID,mysCollectionID):
self.ID = "D" + str(self.getID())
# BEWARE: if we have more than 10,000 docs, a fixed-length
# representation will have to change. Bad idea; don't use it.
# Change the sorting algorithm instead.
# self.ID = "D" + "%04d"%(self.getID())
# So, don't use it.
G.dID2Document[self.ID] = self
G.nDocLastID = self.ID
self.nSize = size
# Who owns this doc
self.sClientID = mysClientID # Doc owned by what client
self.sCollID = mysCollectionID # Doc lives in what collection
NTRC.ntracef(3,"DOC","proc init client|%s| created doc|%s| size|%d|" % (self.sClientID,self.ID,self.nSize))
# Where are copies of this doc stored
self.lServerIDs = list() # What servers currently have this doc
self.lCopyIDs = list() # What copy IDs are there of this doc
self.setServerIDsAll = set([]) # What servers have ever had a copy
# How has the doc fared in the storage wars
self.bMajorityRepair = False # True if ever repaired from majority of copies
self.bMinorityRepair = False # True if ever repaired from minority of copies
self.bDocumentLost = False # True if completely lost, all copies lost
self.bDocumentOkay = True # True if never repaired or lost
self.nRepairsMajority = 0 # Number of repairs of doc from majority copies
self.nRepairsMinority = 0 # Number of repairs of doc from minority copies
示例2: mSelectServersForCollection
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def mSelectServersForCollection(self, mynCollValue):
'''\
Get list of servers at this quality level.
Return a random permutation of the list of servers.
Oops, not any more. Just a list of usable ones.
'''
# Get list of all servers at this quality level.
# Value level translates to quality required and nr copies.
(nQuality, nCopies) = G.dDistnParams[mynCollValue][0]
lServersAtLevel = [ll[1] for ll in G.dQual2Servers[nQuality]]
'''\
For most questions, all servers are functionally
identical. Just take the right number of them. We used
to take a random permutation of the list of servers and
choose from those, hence the name "Perm", but don't waste
the effort any more.
NEW: return only servers that are not already in use and not broken.
'''
lPermChosenAlive = [svr for svr in lServersAtLevel
if not G.dID2Server[svr].bDead]
lPermChosenAvail = [svr for svr in lPermChosenAlive
if not G.dID2Server[svr].bInUse]
NTRC.ntracef(3, "CLI", "proc servers chosen level|%s| alive|%s| "
"full|%s|"
% (lServersAtLevel, lPermChosenAlive, lPermChosenAvail))
# Just make sure there are enough of them to meet the client's needs.
if len(lPermChosenAlive) < nCopies:
# Not enough servers available; someone will have to create one.
lPermChosen = []
else:
lPermChosen = lPermChosenAvail[0:nCopies]
return lPermChosen
示例3: mEvaluateMe
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def mEvaluateMe(self):
'''\
Return tuple of four bools stating doc status.
How many copies do I have left (if any)?
'''
nCopiesLeft = len(
filter(
(lambda sServerID:
self.mTestOneServer(sServerID))
,self.lServerIDs)
)
# Are there any or enough copies left from which to repair the doc?
nNumberOfServers = len(self.setServerIDsAll)
nMajorityOfServers = (nNumberOfServers + 1) / 2
# Include results from previous audits (if any).
(bOkay, bMajority, bMinority, bLost) = (self.bDocumentOkay, self.bMajorityRepair,self.bMinorityRepair,self.bDocumentLost)
NTRC.ntracef(3,"DOC","proc mEvaluateMe doc|%s| ncopies|%s| nservers|%s| okay|%s| majority|%s| minority|%s| lost|%s|" % (self.ID,nCopiesLeft,nNumberOfServers,bOkay,bMajority,bMinority,bLost))
if nCopiesLeft > 0:
# If there is a majority of copies remaining,
# then unambiguous repair is possible.
if nCopiesLeft < nNumberOfServers and nCopiesLeft >= nMajorityOfServers:
bMajority = True
bOkay = False
# Some copies left, but not enough for unambiguous repair.
# Record that forensics are required for this doc repair.
elif nCopiesLeft < nMajorityOfServers:
bMinority = True
bOkay = False
# There are no remaining copies of the doc,
# it cannot be repaired ever, oops. Permanent loss.
else:
bLost = True
bOkay = False
return (bOkay,bMajority,bMinority,bLost)
示例4: mServerIsDead
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def mServerIsDead(self, mysServerID, mysCollID):
'''\
Auditor calls us: a server is dead, no longer
accepting documents. Remove server from active list,
find a new server, populate it.
'''
NTRC.ntracef(3, "CLI", "proc deadserver1 client|%s| place coll|%s| "
"to|%d|servers"
% (self.ID, mysCollID, len(self.lServersToUse)))
lg.logInfo("CLIENT", "server died cli|%s| removed svr|%s| coll|%s| "
% (self.ID, mysServerID, mysCollID))
cColl = G.dID2Collection[mysCollID]
cColl.lServerIDs.remove(mysServerID)
nCollValue = cColl.nValue
lServersForCollection = self.mSelectServersForCollection(nCollValue)
# The distribution params have already limited the
# set of servers in the select-for-collection routine.
# If there are servers available, pick one. Otherwise,
# create a new server that's just like an old one and use it.
if lServersForCollection:
sServerToUse = lServersForCollection.pop(0)
else:
sServerToUse = CServer.fnsInventNewServer()
lg.logInfo("CLIENT", "client|%s| assign new server|%s| to replace|%s|"
% (self.ID, sServerToUse, mysServerID))
nDocs = self.mPlaceCollectionOnServer(mysCollID, sServerToUse)
lg.logInfo("CLIENT", "client|%s| provisioned new server|%s| "
"collection|%s| ndocs|%s|"
% (self.ID, sServerToUse, mysCollID, nDocs))
self.nServerReplacements += 1
return sServerToUse
示例5: cmBeforeAudit
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def cmBeforeAudit(self):
'''
Before each audit cycle, check to see if any servers
have exceeded their lifetimes.
'''
for (sServerID, cServer) in (util.fnttSortIDDict(G.dID2Server)):
fCurrentLife = cServer.mfGetMyCurrentLife()
fFullLife = cServer.mfGetMyFullLife()
fBirthday = cServer.mfGetMyBirthday()
bServerAlive = not cServer.mbIsServerDead()
bServerActive = cServer.bInUse
# Log that we are examining this server,
# but note if it's already dead.
sStatus = "inuse" if bServerActive else ""
sStatus = sStatus if bServerAlive else "dead"
lg.logInfo("SHOCK ", "t|%6.0f| audit+end check svr|%s| "
"life|%.0f|=|%.1f|yr %s"
% (G.env.now, sServerID, fFullLife, fFullLife/10000,
sStatus))
NTRC.ntracef(3, "SHOK", "proc t|%6.0f| check expir? svr|%s| "
"svrdefaulthalflife|%s| born|%s| currlife|%s|"
% (G.env.now, sServerID, G.fServerDefaultHalflife,
fBirthday, fCurrentLife))
# Check to see if the server's lifetime has expired.
bDeadAlready = CShock.cmbShouldServerDieNow(sServerID)
return G.nDeadOldServers
示例6: fnnProcessAllInstructions
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fnnProcessAllInstructions(myitInstructionIterator):
'''
Get the set of instructions that match the user's criteria for this batch,
and run them one by one.
Each instruction (run) is executed once for each random seed value.
Count the number of runs, and don't exceed the user's limit, if any.
If the execution reports a serious error, stop the loop.
'''
nRunNumber = 0
maxcount = int(g.nTestLimit)
# Is this a completely fake test run? Replace templates.
if g.sTestFib.startswith("Y"):
g.lTemplates = g.lFibTemplates
# Process each instruction in turn.
for dRawInstruction in myitInstructionIterator:
NTRC.ntracef(3,"MAIN","proc main raw instruction\n|%s|"
% (dRawInstruction))
dInstruction = fndMaybeEnhanceInstruction(dRawInstruction)
NTRC.ntracef(3,"MAIN","proc main enhanced instruction\n|%s|"
% (dInstruction))
# Execute each instruction once for each random seed value.
nRunNumber += 1
lManyInstr = fnltProcessOneInstructionManyTimes(nRunNumber
, dInstruction)
g.lGiantInstr.extend(lManyInstr)
# If user asked for a short test run today, maybe stop now.
maxcount -= 1
if int(g.nTestLimit) > 0 and maxcount <= 0: break
return nRunNumber
示例7: msGentlyFormat
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def msGentlyFormat(self, mysCmd, mydVals, myg, myCG):
'''
Like string.format() but does not raise exception if the string
contains a name request for which the dictionary does not have
a value. Leaves unfulfilled name requests in place.
Method: construct a dictionary that contains something for every
name requested in the string. The value is either a supplied
value from the caller or a placeholder for the name request.
Then use the now-defanged string.format() method.
This is way harder than it ought to be, grumble.
'''
# Make a dictionary from the names requested in the string
# that just replaces the request '{foo}' with itself.
sReNames = '(:?\{([^\}]+)\})+'
oReNames = re.compile(sReNames)
lNameTuples = oReNames.findall(mysCmd)
NTRC.ntracef(3,"FMT","proc gently tuples|%s|" % (lNameTuples))
lNames = [x[1] for x in lNameTuples]
dNames = dict(zip(lNames, map(lambda s: "{"+s+"}", lNames)))
# Pick up any specified values in the global object
# and from CLI args.
dNames.update(dict(vars(myCG)))
dNames.update(dict(vars(myg)))
# And then add values from the specific instructions.
dNames.update(mydVals)
NTRC.ntrace(3,"proc gently dnames|%s|" % (dNames))
sOut = mysCmd.format(**dNames)
return sOut
示例8: __init__
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def __init__(self, mygl, mynWaitMsec):
threading.Thread.__init__(self, name="endall")
self.gl = mygl
self.nWaitMsec = mynWaitMsec
self.llsFullOutput = list()
NTRC.ntracef(2, "END", "exit init gl|%s| wait|%s|"
% (self.gl, self.nWaitMsec))
示例9: fntRunEverything
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fntRunEverything(mygl, qInstr, fnbQEnd, nWaitMsec, nWaitHowMany):
'''Start an async job for each case. Limit number of concurrent jobs
to the size of the ltJobs vector.
When a job completes, ship its output upline and remove it from
the active lists.
Two separate threads:
- Wait for an empty slot; get an instruction, start an async job.
- Wait for an active job to complete and remove it from lists.
'''
# Fill the list of jobs with empties.
for i in range(mygl.nParallel + 1): mygl.ltJobs.append(None)
mygl.lockJobList = threading.Lock()
mygl.lockPrint = threading.Lock()
# Create and start new threads
NTRC.ntracef(5, "RUN", "proc make thread instances")
mygl.thrStart = CStartAllCases(mygl, mygl.nCoreTimer, mygl.nStuckLimit
, qInstr, fnbQEnd)
mygl.thrEnd = CEndAllCases(mygl, mygl.nCoreTimer, )
mygl.llsFullOutput = [["",""]]
#mygl.thrStart.start()
#mygl.thrEnd.start()
# Wait until all jobs have started and finished.
if (mygl.thrStart.is_alive() and mygl.thrStart.is_alive()):
mygl.thrStart.join() # Runs out of instructions.
mygl.thrEnd.join() # Runs out of finished jobs.
return tWaitStats(ncases=mygl.nCasesDone
, slot=mygl.nWaitedForSlot
, done=mygl.nWaitedForDone
, inst=mygl.nWaitedForInstr)
示例10: fndFormatQuery
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fndFormatQuery(self, mydCli, myg):
'''
Take all the CLI options that might specify a searchable attribute, and
construct a MongoDB or searchspace query dictionary.
This is lots nastier than it first appears to be
because json is so bloody picky.
'''
dOut = dict()
for sAttrib,sValue in mydCli.items():
result = None
if sValue is not None:
# Is it something valid in json?
try:
result = json.loads(sValue)
except ValueError:
# Is it a string that should be an integer, ok in json?
try:
result = int(sValue)
except:
# Is it a naked string for some string-valued var
# that isn't just Y/N or a mandatory string?
# Rule out dict values that are already formatted.
if (isinstance(sValue, str)
and sAttrib not in myg.lYesNoOptions
and sAttrib not in myg.lMandatoryArgs
and '{' not in sValue
and '}' not in sValue
and ':' not in sValue
and ',' not in sValue
):
result = '{"$eq":' + '"'+sValue+'"' + '}'
else:
result = sValue
NTRC.tracef(3, "FMT", "proc FormatQuery notjson item "
"key|%s| val|%s| result|%s|"
% (sAttrib, sValue, result))
NTRC.tracef(3, "FMT", "proc FormatQuery item key|%s| val|%s| result|%s|"
% (sAttrib, sValue, result))
# Can't process dicts thru json twice.
if isinstance(result, dict):
dOut[sAttrib] = sValue
else:
dOut[sAttrib] = result
# Allow only attribs that appear in the database, else will get
# no results due to implied AND of all items in query dict.
dOutSafe = {k:v for k,v in dOut.items() if k in myg.lSearchables}
dOutNotNone = {k:v for k,v in dOutSafe.items() if v is not None}
NTRC.ntracef(3,"FMT","proc dict b4|%s| \nsafe|%s|\nclean|%s|"
% (dOut,dOutSafe,dOutNotNone))
if "sQuery" in dOutNotNone.keys():
# If the brave user has supplied a full, standalone query string,
# add its contents to the query dict so far.
dTmp = dOutNotNone["sQuery"]
del dOutNotNone["sQuery"]
dOutNotNone.update(dTmp)
return dOutNotNone
示例11: fndgGetSearchSpace
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fndgGetSearchSpace(mysDir, mysTyp, mydUserRuleDict):
'''
Produce instruction stream from instruction files and user rules.
'''
dFullDict = fndReadAllInsFiles(mysDir, mysTyp)
(dTrimmedDict,dOriginalDict) = fntProcessAllUserRules(mydUserRuleDict,
dFullDict)
dFilteredDict = fndFilterResults(dTrimmedDict)
fnvTestResults(dFilteredDict, dFullDict)
NTRC.ntracef(3, "SRCH", "proc GetSearchSpace:FilteredDict|%s|"
% (dFilteredDict))
return fndgCombineResults(dFilteredDict)
示例12: fnvGetEnvironmentOverrides
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fnvGetEnvironmentOverrides():
# Allow user to override number of cores to use today.
# Utility routine looks at HW and possible user envir override.
g.nCores = brokergetcores.fnnGetResolvedCores()
NTRC.ntracef(0, "MAIN", "proc ncores|%s|" % (g.nCores))
# Allow user to override the polite interval to use today.
try:
g.nPoliteTimer = int(os.getenv("NPOLITE", CG.nPoliteTimer))
NTRC.ntracef(0, "MAIN", "proc politetimer|%s|msec" % (g.nPoliteTimer))
except (ValueError, TypeError):
raise TypeError("Environment variable NPOLITE must be "
"an integer number of milliseconds.")
示例13: mMergeEvaluation
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def mMergeEvaluation(self,mybOkay,mybMajority,mybMinority,mybLost):
'''\
Carefully combine new doc info with old from audits, if any.
E.g., finally okay only if was okay and still is okay;
finally lost if was lost or is now lost.
'''
NTRC.ntracef(3,"DOC","proc merge in|%s|%s|%s|%s| with doc|%s|%s|%s|%s|" % (mybOkay,mybMajority,mybMinority,mybLost,self.bDocumentOkay,self.bMajorityRepair,self.bMinorityRepair,self.bDocumentLost))
self.bDocumentOkay = self.bDocumentOkay and mybOkay
self.bMajorityRepair = self.bMajorityRepair or mybMajority
self.bMinorityRepair = self.bMinorityRepair or mybMinority
self.bDocumentLost = self.bDocumentLost or mybLost
return (self.bDocumentOkay,self.bMajorityRepair,self.bMinorityRepair,self.bDocumentLost)
示例14: fnnCalcDocSize
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def fnnCalcDocSize(mynLevel):
lPercents = G.dDocParams[mynLevel]
nPctRandom = makeunif(0,100)
nPctCum = 0
for lTriple in lPercents:
(nPercent, nMean, nSdev) = lTriple
nPctCum += nPercent
if nPctRandom <= nPctCum:
nDocSize = int(makennnorm(nMean, nSdev))
NTRC.ntracef(3,"DOC","proc CalcDocSize rand|%s| cum|%s| pct|%s| "
"mean|%s| sd|%s| siz|%s|"
% (nPctRandom,nPctCum,nPercent,nMean,nSdev,nDocSize))
break
return nDocSize
示例15: mDestroyShelf
# 需要导入模块: from NewTraceFac import NTRC [as 别名]
# 或者: from NewTraceFac.NTRC import ntracef [as 别名]
def mDestroyShelf(self):
''' Nuke all the copies on the shelf.
Can't delete the CShelf object, however.
'''
NTRC.ntracef(3, "SHLF", "proc mDestroyShelf1 shelf|%s| "
"has ncopies|%s|"
% (self.ID, len(self.lCopyIDs)))
lg.logInfo("SHELF ", "t|%6.0f| destroy shelf|%s| "
"of svr|%s| ncopies|%s|"
% (G.env.now, self.ID, self.sServerID,
len(self.lCopyIDs)))
lAllCopyIDs = self.lCopyIDs[:] # DANGER: list modified inside loop,
# requires deepcopy.
for sCopyID in lAllCopyIDs:
self.mDestroyCopy(sCopyID)